text
stringlengths 3
1.04M
| lang
stringclasses 4
values | len
int64 3
1.04M
|
---|---|---|
"""
Define the base window object and the constants/"globals" used
by the file of this module.
A window is a little part of the screen, for example the input window,
the text window, the roster window, etc.
A Tab (see the src/tabs module) is composed of multiple Windows
"""
import logging
log = logging.getLogger(__name__)
import collections
import curses
import string
import core
import singleton
from theming import to_curses_attr, read_tuple
FORMAT_CHAR = '\x19'
# These are non-printable chars, so they should never appear in the input,
# I guess. But maybe we can find better chars that are even less risky.
format_chars = ['\x0E', '\x0F', '\x10', '\x11', '\x12', '\x13',
'\x14', '\x15', '\x16', '\x17', '\x18']
# different colors allowed in the input
allowed_color_digits = ('0', '1', '2', '3', '4', '5', '6', '7')
# msg is a reference to the corresponding Message tuple. text_start and
# text_end are the position delimiting the text in this line.
Line = collections.namedtuple('Line', 'msg start_pos end_pos prepend')
LINES_NB_LIMIT = 4096
class DummyWin(object):
def __getattribute__(self, name):
if name != '__bool__':
return lambda *args, **kwargs: (0, 0)
else:
return object.__getattribute__(self, name)
def __bool__(self):
return False
class Win(object):
_win_core = None
_tab_win = None
def __init__(self):
self._win = None
self.height, self.width = 0, 0
def _resize(self, height, width, y, x):
if height == 0 or width == 0:
self.height, self.width = height, width
return
self.height, self.width, self.x, self.y = height, width, x, y
try:
self._win = Win._tab_win.derwin(height, width, y, x)
except:
log.debug('DEBUG: mvwin returned ERR. Please investigate')
if self._win is None:
self._win = DummyWin()
def resize(self, height, width, y, x):
"""
Override if something has to be done on resize
"""
self._resize(height, width, y, x)
def _refresh(self):
self._win.noutrefresh()
def addnstr(self, *args):
"""
Safe call to addnstr
"""
try:
self._win.addnstr(*args)
except:
# this actually mostly returns ERR, but works.
# more specifically, when the added string reaches the end
# of the screen.
pass
def addstr(self, *args):
"""
Safe call to addstr
"""
try:
self._win.addstr(*args)
except:
pass
def move(self, y, x):
try:
self._win.move(y, x)
except:
self._win.move(0, 0)
def addstr_colored(self, text, y=None, x=None):
"""
Write a string on the window, setting the
attributes as they are in the string.
For example:
\x19bhello → hello in bold
\x191}Bonj\x192}our → 'Bonj' in red and 'our' in green
next_attr_char is the \x19 delimiter
attr_char is the char following it, it can be
one of 'u', 'b', 'c[0-9]'
"""
if y is not None and x is not None:
self.move(y, x)
next_attr_char = text.find(FORMAT_CHAR)
while next_attr_char != -1 and text:
if next_attr_char + 1 < len(text):
attr_char = text[next_attr_char+1].lower()
else:
attr_char = str()
if next_attr_char != 0:
self.addstr(text[:next_attr_char])
if attr_char == 'o':
self._win.attrset(0)
elif attr_char == 'u':
self._win.attron(curses.A_UNDERLINE)
elif attr_char == 'b':
self._win.attron(curses.A_BOLD)
if (attr_char in string.digits or attr_char == '-') and attr_char != '':
color_str = text[next_attr_char+1:text.find('}', next_attr_char)]
if ',' in color_str:
tup, char = read_tuple(color_str)
self._win.attron(to_curses_attr(tup))
if char:
if char == 'o':
self._win.attrset(0)
elif char == 'u':
self._win.attron(curses.A_UNDERLINE)
elif char == 'b':
self._win.attron(curses.A_BOLD)
else:
# this will reset previous bold/uderline sequences if any was used
self._win.attroff(curses.A_UNDERLINE)
self._win.attroff(curses.A_BOLD)
elif color_str:
self._win.attron(to_curses_attr((int(color_str), -1)))
text = text[next_attr_char+len(color_str)+2:]
else:
text = text[next_attr_char+2:]
next_attr_char = text.find(FORMAT_CHAR)
self.addstr(text)
def finish_line(self, color=None):
"""
Write colored spaces until the end of line
"""
(y, x) = self._win.getyx()
size = self.width - x
if color:
self.addnstr(' '*size, size, to_curses_attr(color))
else:
self.addnstr(' '*size, size)
@property
def core(self):
if not Win._win_core:
Win._win_core = singleton.Singleton(core.Core)
return Win._win_core
| python | 5,516 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "homepage.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| python | 251 |
#!/usr/bin/env/python
from __future__ import print_function
from typing import List, Any, Sequence
import tensorflow as tf
import time
import os
import json
import numpy as np
import pickle
import random
from utils import MLP, ThreadedIterator
print(tf.__version__)
class ChemModel(object):
@classmethod
def default_params(cls):
return {
'num_epochs': 200,
'patience': 150,
'learning_rate': 0.002,
'clamp_gradient_norm': 1, # 1.0->0.8
'out_layer_dropout_keep_prob': 0.8, # 1.0->0.8
'hidden_size': 256, # 256/512/1024/2048
'num_timesteps': 4, # 4->6
'use_graph': True,
'tie_fwd_bkwd': False, # True->False
'task_ids': [0],
# 'random_seed': 6600,
# 'threshold': 0.5,
'train_file': 'train_data/reentrancy/train_corenodes.json',
'valid_file': 'train_data/reentrancy/valid_corenodes.json'
# 'train_file': 'train_data/infinite_loop/train_corenodes.json',
# 'valid_file': 'train_data/infinite_loop/valid_corenodes.json'
}
def __init__(self, args):
self.args = args
# Collect argument things:
data_dir = ''
if '--data_dir' in args and args['--data_dir'] is not None:
data_dir = args['--data_dir']
self.data_dir = data_dir
# random_seed = None
random_seed = args.get('--random_seed')
self.random_seed = int(9930)
threshold = args.get('--thresholds')
self.threshold = float(0.352)
self.run_id = "_".join([time.strftime("%Y-%m-%d-%H-%M-%S"), str(os.getpid())])
log_dir = args.get('--log_dir') or '.'
self.log_file = os.path.join(log_dir, "%s_log.json" % self.run_id)
self.best_model_file = os.path.join(log_dir, "%s_model_best.pickle" % self.run_id)
# Collect parameters:
params = self.default_params()
config_file = args.get('--config-file')
if config_file is not None:
with open(config_file, 'r') as f:
params.update(json.load(f))
config = args.get('--config')
if config is not None:
params.update(json.loads(config))
self.params = params
print("Run %s starting with following parameters:\n%s" % (self.run_id, json.dumps(self.params)))
random.seed(self.random_seed)
np.random.seed(self.random_seed)
print("Run with current seed %s " % self.random_seed)
# Load baseline:
self.max_num_vertices = 0
self.num_edge_types = 0
self.annotation_size = 0
self.num_graph = 1
self.train_num_graph = 0
self.valid_num_graph = 0
self.train_data, self.train_num_graph = self.load_data(params['train_file'], is_training_data=True)
self.valid_data, self.valid_num_graph = self.load_data(params['valid_file'], is_training_data=False)
# Build the actual model
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph, config=config)
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
self.placeholders = {}
self.weights = {}
self.ops = {}
self.make_model()
self.make_train_step()
# Restore/initialize variables:
restore_file = args.get('--restore')
if restore_file is not None:
self.restore_model(restore_file)
else:
self.initialize_model()
def load_data(self, file_name, is_training_data: bool):
full_path = os.path.join(self.data_dir, file_name)
print("Loading baseline from %s" % full_path)
with open(full_path, 'r') as f:
data = json.load(f)
restrict = self.args.get("--restrict_data")
if restrict is not None and restrict > 0:
data = data[:restrict]
# Get some common baseline out:
num_fwd_edge_types = 0
for g in data:
self.max_num_vertices = max(self.max_num_vertices, max([v for e in g['graph'] for v in [e[0], e[2]]]))
num_fwd_edge_types = max(num_fwd_edge_types, max([e[1] for e in g['graph']]))
self.num_edge_types = max(self.num_edge_types, num_fwd_edge_types * (1 if self.params['tie_fwd_bkwd'] else 2))
self.annotation_size = max(self.annotation_size, len(data[0]["node_features"][0]))
return self.process_raw_graphs(data, is_training_data)
@staticmethod
def graph_string_to_array(graph_string: str) -> List[List[int]]:
return [[int(v) for v in s.split(' ')]
for s in graph_string.split('\n')]
def process_raw_graphs(self, raw_data: Sequence[Any], is_training_data: bool) -> Any:
raise Exception("Models have to implement process_raw_graphs!")
def make_model(self):
self.placeholders['target_values'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_values')
self.placeholders['target_mask'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_mask')
self.placeholders['num_graphs'] = tf.placeholder(tf.int32, [], name='num_graphs')
self.placeholders['out_layer_dropout_keep_prob'] = tf.placeholder(tf.float32, [],
name='out_layer_dropout_keep_prob')
with tf.variable_scope("graph_model"):
self.prepare_specific_graph_model()
# This does the actual graph work:
if self.params['use_graph']:
self.ops['final_node_representations'] = self.compute_final_node_representations()
else:
self.ops['final_node_representations'] = tf.zeros_like(self.placeholders['process_raw_graphs'])
self.ops['losses'] = []
for (internal_id, task_id) in enumerate(self.params['task_ids']):
with tf.variable_scope("out_layer_task%i" % task_id):
with tf.variable_scope("regression_gate"):
self.weights['regression_gate_task%i' % task_id] = MLP(2 * self.params['hidden_size'], 1, [],
self.placeholders[
'out_layer_dropout_keep_prob'])
with tf.variable_scope("regression"):
self.weights['regression_transform_task%i' % task_id] = MLP(self.params['hidden_size'], 1, [],
self.placeholders[
'out_layer_dropout_keep_prob'])
computed_values, sigm_val = self.gated_regression(self.ops['final_node_representations'],
self.weights['regression_gate_task%i' % task_id],
self.weights['regression_transform_task%i' % task_id])
def f(x):
x = 1 * x
x = x.astype(np.float32)
return x
new_computed_values = tf.nn.sigmoid(computed_values)
new_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=computed_values,
labels=self.placeholders[
'target_values'][
internal_id, :]))
a = tf.math.greater_equal(new_computed_values, self.threshold)
a = tf.py_func(f, [a], tf.float32)
correct_pred = tf.equal(a, self.placeholders['target_values'][internal_id, :])
self.ops['new_computed_values'] = new_computed_values
self.ops['sigm_val'] = sigm_val
self.ops['accuracy_task%i' % task_id] = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
b = tf.multiply(self.placeholders['target_values'][internal_id, :], 2)
b = tf.py_func(f, [b], tf.float32)
c = tf.cast(a, tf.float32)
d = tf.math.add(b, c)
self.ops['sigm_c'] = correct_pred
d_TP = tf.math.equal(d, 3)
TP = tf.reduce_sum(tf.cast(d_TP, tf.float32)) # true positive
d_FN = tf.math.equal(d, 2)
FN = tf.reduce_sum(tf.cast(d_FN, tf.float32)) # false negative
d_FP = tf.math.equal(d, 1)
FP = tf.reduce_sum(tf.cast(d_FP, tf.float32)) # false positive
d_TN = tf.math.equal(d, 0)
TN = tf.reduce_sum(tf.cast(d_TN, tf.float32)) # true negative
self.ops['sigm_sum'] = tf.add_n([TP, FN, FP, TN])
self.ops['sigm_TP'] = TP
self.ops['sigm_FN'] = FN
self.ops['sigm_FP'] = FP
self.ops['sigm_TN'] = TN
R = tf.cast(tf.divide(TP, tf.add(TP, FN)), tf.float32) # Recall
P = tf.cast(tf.divide(TP, tf.add(TP, FP)), tf.float32) # Precision
FPR = tf.cast(tf.divide(FP, tf.add(TN, FP)), tf.float32) # FPR: false positive rate
D_TP = tf.add(TP, TP)
F1 = tf.cast(tf.divide(D_TP, tf.add_n([D_TP, FP, FN])), tf.float32) # F1
self.ops['sigm_Recall'] = R
self.ops['sigm_Precision'] = P
self.ops['sigm_F1'] = F1
self.ops['sigm_FPR'] = FPR
self.ops['losses'].append(new_loss)
self.ops['loss'] = tf.reduce_sum(self.ops['losses'])
def make_train_step(self):
trainable_vars = self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if self.args.get('--freeze-graph-model'):
graph_vars = set(self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="graph_model"))
filtered_vars = []
for var in trainable_vars:
if var not in graph_vars:
filtered_vars.append(var)
else:
print("Freezing weights of variable %s." % var.name)
trainable_vars = filtered_vars
optimizer = tf.train.AdamOptimizer(self.params['learning_rate'])
grads_and_vars = optimizer.compute_gradients(self.ops['loss'], var_list=trainable_vars)
clipped_grads = []
for grad, var in grads_and_vars:
if grad is not None:
clipped_grads.append((tf.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))
else:
clipped_grads.append((grad, var))
self.ops['train_step'] = optimizer.apply_gradients(clipped_grads)
# Initialize newly-introduced variables:
self.sess.run(tf.local_variables_initializer())
def gated_regression(self, last_h, regression_gate, regression_transform):
raise Exception("Models have to implement gated_regression!")
def prepare_specific_graph_model(self) -> None:
raise Exception("Models have to implement prepare_specific_graph_model!")
def compute_final_node_representations(self) -> tf.Tensor:
raise Exception("Models have to implement compute_final_node_representations!")
def make_minibatch_iterator(self, data: Any, is_training: bool):
raise Exception("Models have to implement make_minibatch_iterator!")
def run_epoch(self, epoch_name: str, data, is_training: bool):
chemical_accuracies = np.array([0.066513725, 0.012235489, 0.071939046, 0.033730778, 0.033486113, 0.004278493,
0.001330901, 0.004165489, 0.004128926, 0.00409976, 0.004527465, 0.012292586,
0.037467458])
loss = 0
accuracies = []
start_time = time.time()
processed_graphs = 0
accuracy_ops = [self.ops['accuracy_task%i' % task_id] for task_id in self.params['task_ids']]
batch_iterator = ThreadedIterator(self.make_minibatch_iterator(data, is_training), max_queue_size=5)
for step, batch_data in enumerate(batch_iterator):
num_graphs = batch_data[self.placeholders['num_graphs']]
processed_graphs += num_graphs
if is_training:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = self.params[
'out_layer_dropout_keep_prob']
fetch_list = [self.ops['loss'], accuracy_ops, self.ops['train_step']]
else:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
fetch_list = [self.ops['loss'], accuracy_ops]
val_1, val_2, val_3, val_4, val_5, val_6 = self.sess.run(
[self.ops['sigm_c'], self.ops['sigm_TP'], self.ops['sigm_FN'], self.ops['sigm_FP'], self.ops['sigm_TN'],
self.ops['sigm_sum']], feed_dict=batch_data)
val_R, val_P, val_F1, val_FPR = self.sess.run(
[self.ops['sigm_Recall'], self.ops['sigm_Precision'], self.ops['sigm_F1'], self.ops['sigm_FPR']],
feed_dict=batch_data)
result = self.sess.run(fetch_list, feed_dict=batch_data)
(batch_loss, batch_accuracies) = (result[0], result[1])
loss += batch_loss * num_graphs
accuracies.append(np.array(batch_accuracies) * num_graphs)
print("random seed: {}".format(self.random_seed))
print("sum: {}".format(val_6))
print("TP: {}".format(val_2))
print("FN: {}".format(val_3))
print("FP: {}".format(val_4))
print("TN: {}".format(val_5))
print("Recall: {}".format(val_R))
print("Precision: {}".format(val_P))
print("F1: {}".format(val_F1))
print("FPR: {}".format(val_FPR))
print("Running %s, batch %i (has %i graphs). "
"Loss so far: %.4f" % (epoch_name, step, num_graphs, loss / processed_graphs), end='\r')
accuracies = np.sum(accuracies, axis=0) / processed_graphs
loss = loss / processed_graphs
error_ratios = accuracies / chemical_accuracies[self.params["task_ids"]]
instance_per_sec = processed_graphs / (time.time() - start_time)
return loss, accuracies, error_ratios, instance_per_sec
def train(self):
val_acc1 = []
log_to_save = []
total_time_start = time.time()
with self.graph.as_default():
if self.args.get('--restore') is not None:
_, valid_accs, _, _ = self.run_epoch("Resumed (validation)", self.valid_data, False)
best_val_acc = np.sum(valid_accs)
best_val_acc_epoch = 0
print("\r\x1b[KResumed operation, initial cum. val. acc: %.5f" % best_val_acc)
else:
(best_val_acc, best_val_acc_epoch) = (float("+inf"), 0)
for epoch in range(1, self.params['num_epochs'] + 1):
print("== Epoch %i" % epoch)
train_start = time.time()
self.num_graph = self.train_num_graph
train_loss, train_accs, train_errs, train_speed = self.run_epoch("epoch %i (training)" % epoch,
self.train_data, True)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], train_accs)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], train_errs)])
print("\r\x1b[K Train: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (train_loss,
accs_str,
errs_str,
train_speed))
epoch_time_train = time.time() - train_start
print(epoch_time_train)
valid_start = time.time()
self.num_graph = self.valid_num_graph
valid_loss, valid_accs, valid_errs, valid_speed = self.run_epoch("epoch %i (validation)" % epoch,
self.valid_data, False)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], valid_accs)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], valid_errs)])
print("\r\x1b[K Valid: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (valid_loss,
accs_str,
errs_str,
valid_speed))
epoch_time_valid = time.time() - valid_start
print(epoch_time_valid)
val_acc1.append(valid_accs)
epoch_time_total = time.time() - total_time_start
print(epoch_time_total)
log_entry = {
'epoch': epoch,
'time': epoch_time_total,
'train_results': (train_loss, train_accs.tolist(), train_errs.tolist(), train_speed),
'valid_results': (valid_loss, valid_accs.tolist(), valid_errs.tolist(), valid_speed),
}
log_to_save.append(log_entry)
val_acc = np.sum(valid_accs) # type: float
if val_acc < best_val_acc:
print(" (Best epoch so far, cum. val. acc decreased to %.5f from %.5f. Saving to '%s')" % (
val_acc, best_val_acc, self.best_model_file))
best_val_acc = val_acc
best_val_acc_epoch = epoch
elif epoch - best_val_acc_epoch >= self.params['patience']:
print("Stopping training after %i epochs without improvement on validation accuracy." % self.params[
'patience'])
break
print(max(val_acc1))
def save_model(self, path: str) -> None:
weights_to_save = {}
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
assert variable.name not in weights_to_save
weights_to_save[variable.name] = self.sess.run(variable)
data_to_save = {
"params": self.params,
"weights": weights_to_save
}
with open(path, 'wb') as out_file:
pickle.dump(data_to_save, out_file, pickle.HIGHEST_PROTOCOL)
def initialize_model(self) -> None:
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
self.sess.run(init_op)
def restore_model(self, path: str) -> None:
print("Restoring weights from file %s." % path)
with open(path, 'rb') as in_file:
data_to_load = pickle.load(in_file)
# Assert that we got the same model configuration
assert len(self.params) == len(data_to_load['params'])
for (par, par_value) in self.params.items():
# Fine to have different task_ids:
if par not in ['task_ids', 'num_epochs']:
assert par_value == data_to_load['params'][par]
variables_to_initialize = []
with tf.name_scope("restore"):
restore_ops = []
used_vars = set()
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
used_vars.add(variable.name)
if variable.name in data_to_load['weights']:
restore_ops.append(variable.assign(data_to_load['weights'][variable.name]))
else:
print('Freshly initializing %s since no saved value was found.' % variable.name)
variables_to_initialize.append(variable)
for var_name in data_to_load['weights']:
if var_name not in used_vars:
print('Saved weights for %s not used by model.' % var_name)
restore_ops.append(tf.variables_initializer(variables_to_initialize))
self.sess.run(restore_ops)
| python | 21,122 |
from decimal import Decimal as D
from collections import defaultdict, OrderedDict
from dateutil.relativedelta import relativedelta
from accounting.apps.books.models import Invoice, Bill
from accounting.apps.books.calculators import ProfitsLossCalculator
from accounting.libs.intervals import TimeInterval
class BaseReport(object):
title = None
period = None
def __init__(self, title, start, end):
self.title = title
self.period = TimeInterval(start, end)
def generate(self):
raise NotImplementedError
class TaxRateSummary(object):
tax_rate = None
taxable_amount = D('0')
expenses_amount = D('0')
@property
def collected_taxes(self):
return self.tax_rate.rate * self.taxable_amount
@property
def deductible_taxes(self):
return self.tax_rate.rate * self.expenses_amount
@property
def net_amount(self):
return self.taxable_amount - self.expenses_amount
@property
def net_taxes(self):
return self.tax_rate.rate * self.net_amount
class TaxReport(BaseReport):
# TODO implement 'Billed (Accrual) / Collected (Cash based)'
organization = None
tax_summaries = None
def __init__(self, organization, start, end):
super().__init__("Tax Report", start, end)
self.organization = organization
self.tax_summaries = defaultdict(TaxRateSummary)
def generate(self):
invoice_queryset = Invoice.objects.all()
bill_queryset = Bill.objects.all()
self.generate_for_sales(invoice_queryset)
self.generate_for_sales(bill_queryset)
def generate_for_sales(self, sales_queryset):
calculator = ProfitsLossCalculator(self.organization,
start=self.period.start,
end=self.period.end)
for output in calculator.process_generator(sales_queryset):
summary = self.tax_summaries[output.tax_rate.pk]
summary.tax_rate = output.tax_rate
if isinstance(output.sale, Invoice):
summary.taxable_amount += output.amount_excl_tax
elif isinstance(output.sale, Bill):
summary.expenses_amount += output.amount_excl_tax
else:
raise ValueError("Unsupported type of sale {}"
.format(output.sale.__class__))
class ProfitAndLossSummary(object):
grouping_date = None
sales_amount = D('0')
expenses_amount = D('0')
@property
def net_profit(self):
return self.sales_amount - self.expenses_amount
class ProfitAndLossReport(BaseReport):
# TODO implement 'Billed (Accrual) / Collected (Cash based)'
organization = None
summaries = None
total_summary = None
RESOLUTION_MONTHLY = 'monthly'
RESOLUTION_CHOICES = (
RESOLUTION_MONTHLY,
)
group_by_resolution = RESOLUTION_MONTHLY
def __init__(self, organization, start, end):
super().__init__("Profit and Loss", start, end)
self.organization = organization
self.summaries = {}
steps_interval = relativedelta(end, start)
assert self.group_by_resolution in self.RESOLUTION_CHOICES, \
"No a resolution choice"
if self.group_by_resolution == self.RESOLUTION_MONTHLY:
for step in range(0, steps_interval.months):
key_date = start + relativedelta(months=step)
self.summaries[key_date] = ProfitAndLossSummary()
else:
raise ValueError("Unsupported resolution {}"
.format(self.group_by_resolution))
self.total_summary = ProfitAndLossSummary()
def group_by_date(self, date):
if self.group_by_resolution == self.RESOLUTION_MONTHLY:
grouping_date = date.replace(day=1)
else:
raise ValueError("Unsupported resolution {}"
.format(self.group_by_resolution))
return grouping_date
def generate(self):
invoice_queryset = Invoice.objects.all()
bill_queryset = Bill.objects.all()
self.generate_for_sales(invoice_queryset)
self.generate_for_sales(bill_queryset)
# order the results
self.summaries = OrderedDict(sorted(self.summaries.items()))
# compute totals
for summary in self.summaries.values():
self.total_summary.sales_amount += summary.sales_amount
self.total_summary.expenses_amount += summary.expenses_amount
def generate_for_sales(self, sales_queryset):
calculator = ProfitsLossCalculator(self.organization,
start=self.period.start,
end=self.period.end)
for output in calculator.process_generator(sales_queryset):
key_date = self.group_by_date(output.payment.date_paid)
summary = self.summaries[key_date]
if isinstance(output.sale, Invoice):
summary.sales_amount += output.amount_excl_tax
elif isinstance(output.sale, Bill):
summary.expenses_amount += output.amount_excl_tax
else:
raise ValueError("Unsupported type of sale {}"
.format(output.sale.__class__))
class PayRunSummary(object):
payroll_tax_rate = None
total_excl_tax = D('0')
@property
def payroll_taxes(self):
return self.payroll_tax_rate * self.total_excl_tax
class PayRunReport(BaseReport):
organization = None
summaries = None
total_payroll_taxes = D('0')
def __init__(self, organization, start, end):
super().__init__("Pay Run Report", start, end)
self.organization = organization
self.summaries = defaultdict(PayRunSummary)
def generate(self):
employee_queryset = self.organization.employees.all()
self.generate_for_employees(employee_queryset)
def generate_for_employees(self, employee_queryset):
total_payroll_taxes = D('0')
calculator = ProfitsLossCalculator(self.organization,
start=self.period.start,
end=self.period.end)
for emp in employee_queryset:
summary = self.summaries[emp.composite_name]
summary.employee = emp
summary.payroll_tax_rate = emp.payroll_tax_rate
if emp.salary_follows_profits:
# TODO compute profits based on the period interval
profits = calculator.profits()
summary.total_excl_tax = profits * emp.shares_percentage
else:
raise ValueError("Salary not indexed on the profits "
"are not supported yet")
total_payroll_taxes += summary.payroll_taxes
# Total payroll
self.total_payroll_taxes = total_payroll_taxes
class InvoiceDetailsReport(BaseReport):
organization = None
invoices = None
tax_rates = None
def __init__(self, organization, start, end):
super().__init__("Pay Run Report", start, end)
self.organization = organization
self.tax_rates = organization.tax_rates.all()
def generate(self):
invoice_queryset = self.organization.invoices.all()
self.generate_for_invoices(invoice_queryset)
def generate_for_invoices(self, invoice_queryset):
invoice_queryset = (invoice_queryset
.filter(payments__date_paid__range=[
self.period.start,
self.period.end
]))
# optimize the query
invoice_queryset = (invoice_queryset
.select_related(
'organization')
.prefetch_related(
'lines',
'lines__tax_rate',
'payments',
'organization__employees',)
.distinct())
self.invoices = invoice_queryset
| python | 7,987 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torchvision
from fcos_core.structures.bounding_box import BoxList
from fcos_core.structures.segmentation_mask import SegmentationMask
from fcos_core.structures.keypoint import PersonKeypoints
min_keypoints_per_image = 10
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno):
ids.append(img_id)
self.ids = ids
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self._transforms = transforms
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
# masks = SegmentationMask(masks, img.size, mode='poly')
# target.add_field("masks", masks)
# print("boxes")
# print(boxes)
target.add_field("masks", boxes)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
| python | 3,703 |
#!/usr/bin/env python
"""Fabfile using only commands from buedafab (https://github.com/bueda/ops) to
deploy this app to remote servers.
"""
import os
from fabric.api import *
from buedafab.test import (test, tornado_test_runner as _tornado_test_runner,
lint)
from buedafab.deploy.types import tornado_deploy as deploy
from buedafab.environments import development, staging, production, localhost
from buedafab.tasks import (setup, restart_webserver, rollback, enable,
disable, maintenancemode, rechef)
# For a description of these attributes, see https://github.com/bueda/ops
env.unit = "rishacar"
env.path = "/var/webapps/%(unit)s" % env
env.scm = "[email protected]:bueda/%(unit)s.git" % env
env.scm_http_url = "http://github.com/bueda/%(unit)s" % env
env.root_dir = os.path.abspath(os.path.dirname(__file__))
env.test_runner = _tornado_test_runner
env.pip_requirements = ["requirements/common.txt",
"vendor/allo/pip-requirements.txt",]
env.pip_requirements_dev = ["requirements/dev.txt",]
env.pip_requirements_production = ["requirements/production.txt",]
| python | 1,085 |
from typing import Optional
from application.models import IviApiResponseResult, IviApiResponse
from application.service.elastic import ElasticWizard
from application.service.http import HTTPClient, get_external_api_headers
from application.settings import Settings
VID_TYPE_MAP = {"film": "Фильм", "serial": "Сериал"}
class IviOrderManager:
api_limit = Settings.ivi.LIMIT
@classmethod
async def run(cls, message: str):
api_link = Settings.ivi.IVI_API_LINK.format(name=message, limit=cls.api_limit)
objects = await cls.send_request(api_link)
already_have_titles = await cls.check_if_content_already_in_db(objects)
titles = []
for result in objects:
await ElasticWizard.store_object(result)
titles.append(result.title)
return {"new": titles, "old": already_have_titles}
@classmethod
async def send_request(cls, api_link: str) -> Optional[list[IviApiResponseResult]]:
ivi_response = await HTTPClient.get(api_link, headers=get_external_api_headers())
return IviApiResponse(**ivi_response).result
@classmethod
async def check_if_content_already_in_db(cls, objects: list[IviApiResponseResult]) -> list[str]:
result = await ElasticWizard.check_objects(objects)
already_have_ids = {int(item["_id"]) for item in result["hits"]["hits"]}
already_have_titles = []
for obj in objects:
if obj.id in already_have_ids:
already_have_titles.append(f"{obj.title} ({obj.year_of_content})")
objects.remove(obj)
return already_have_titles
| python | 1,625 |
# Generated by Django 3.2.4 on 2021-08-04 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recibidos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comprobantesrecibidos',
name='Tipo',
field=models.CharField(max_length=60),
),
]
| python | 390 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
setup_cfg = {str(k): str(v) for k, v in setup_cfg.items()} #Making sure parsed data is in string not unicode
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
nbsphinx_prolog = """
The notebook is available here:
https://github.com/starkit/wsynphot/tree/master/docs/{{ env.doc2path(env.docname, base=None) }}
----
"""
nbsphinx_execute = 'never'
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
html_theme_options = {
'logotext1': 'W', # white, semi-bold
'logotext2': 'Synphot', # orange, light
'logotext3': ':documentation'}
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
extensions += [
'nbsphinx',
]
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
| python | 6,812 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import googletest
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
inp = np.random.rand(*shape).astype(np.float32)
# Convert to HSV and back, as a batch and individually
with self.test_session() as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unpack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.pack(split1)
join2 = array_ops.pack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
rgb_np = np.array(data, dtype=np.float32).reshape([2, 2, 3]) / 255.
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval()
self.assertAllClose(rgb_tf, rgb_np)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]],
dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]],
dtype=np.uint8).reshape([1, 1, 2, 3])
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
class FlipTest(test_util.TensorFlowTestCase):
def testIdempotentLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testIdempotentUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testIdempotentTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
class RandomFlipTest(test_util.TensorFlowTestCase):
def testRandomLeftRight(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([1, 2, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
def testRandomUpDown(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([2, 1, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor, min_value, max_value):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x,
contrast_factor,
min_value=min_value,
max_value=max_value)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 63, 169, 255, 29, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=2.0,
min_value=None,
max_value=None)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape)
y_data = [0, 0, 0, 62.75, 169.25, 255, 28.75, 0, 255, 134.75, 255, 0]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=2.0,
min_value=0,
max_value=255)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [23, 53, 66, 50, 118, 172, 41, 54, 176, 68, 178, 60]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=0.5,
min_value=None,
max_value=None)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 11, 0, 255, 117, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=2.0,
min_value=None,
max_value=None)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, min_value, max_value):
with self.test_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x,
delta,
min_value=min_value,
max_value=max_value)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10.0, min_value=None, max_value=None)
def testPositiveDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10.0, min_value=None, max_value=None)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [5, 5, 5, 44, 125, 216, 27, 5, 224, 80, 245, 5]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10.0, min_value=5, max_value=None)
class RandomCropTest(test_util.TensorFlowTestCase):
def testNoOp(self):
# No random cropping is performed since the target width and height
# are match the image dimensions.
height = 4
width = 5
x_shape = [height, width, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
target_shape_np = np.array([height, width], dtype=np.int64)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
target_shape = constant_op.constant(target_shape_np, shape=[2])
y = image_ops.random_crop(x, target_shape)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testRandomization(self):
# Run 1x1 crop num_samples times in an image and ensure that one finds each
# pixel 1/num_pixels of the time.
num_samples = 1000
height = 5
width = 4
num_pixels = height * width
data = np.arange(num_pixels).reshape([height, width, 1])
x_np = np.array(data).astype(np.int32)
target_shape_np = np.array([1, 1], dtype=np.int64)
y = []
with self.test_session():
x = constant_op.constant(x_np, shape=x_np.shape)
target_shape = constant_op.constant(target_shape_np, shape=[2])
y_tf = image_ops.random_crop(x, target_shape)
for _ in xrange(num_samples):
y_np = y_tf.eval()
self.assertAllEqual(y_np.shape, [1, 1, 1])
y.extend(y_np.flatten())
# Calculate the mean and 4 * standard deviation.
mean = [num_samples / num_pixels] * num_pixels
four_stddev = 4.0 * np.sqrt(mean)
# Ensure that each entry is observed in 1/num_pixels of the samples
# within 4 standard deviations.
counts = np.bincount(y)
self.assertAllClose(counts, mean, atol=four_stddev)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
x2 = np.square(x).astype(np.float32)
mn = np.mean(x)
vr = np.mean(x2) - (mn * mn)
stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_whitening(x)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_whitening(im)
with self.test_session():
whiten_np = whiten.eval()
self.assertFalse(np.any(np.isnan(whiten_np)))
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def testNoOp(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
target_height = x_shape[0]
target_width = x_shape[1]
y = image_ops.crop_to_bounding_box(x, 0, 0, target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_np = np.arange(0, 30, dtype=np.int32).reshape([6, 5, 1])
offset_height = 1
after_height = 2
offset_width = 0
after_width = 3
target_height = x_np.shape[0] - offset_height - after_height
target_width = x_np.shape[1] - offset_width - after_width
y_np = x_np[offset_height:offset_height + target_height,
offset_width:offset_width + target_width, :]
with self.test_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.crop_to_bounding_box(x, offset_height, offset_width,
target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf.flatten(), y_np.flatten())
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def testNoOp(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
target_height = x_shape[0]
target_width = x_shape[1]
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.pad_to_bounding_box(x, 0, 0, target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testPadding(self):
x_shape = [3, 4, 1]
x_np = np.ones(x_shape, dtype=np.float32)
offset_height = 2
after_height = 3
offset_width = 1
after_width = 4
target_height = x_shape[0] + offset_height + after_height
target_width = x_shape[1] + offset_width + after_width
# Note the padding are along batch, height, width and depth.
paddings = ((offset_height, after_height),
(offset_width, after_width),
(0, 0))
y_np = np.pad(x_np, paddings, 'constant')
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.pad_to_bounding_box(x, offset_height, offset_width,
target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
class ResizeImagesTest(test_util.TensorFlowTestCase):
OPTIONS = [image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA]
def testNoOp(self):
img_shape = [1, 6, 4, 1]
data = [128, 128, 64, 64,
128, 128, 64, 64,
64, 64, 128, 128,
64, 64, 128, 128,
50, 50, 100, 100,
50, 50, 100, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 6
target_width = 4
for opt in self.OPTIONS:
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width, opt)
resized = y.eval()
self.assertAllClose(resized, img_np, atol=1e-5)
def testResizeDown(self):
data = [128, 128, 64, 64,
128, 128, 64, 64,
64, 64, 128, 128,
64, 64, 128, 128,
50, 50, 100, 100,
50, 50, 100, 100]
expected_data = [128, 64,
64, 128,
50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width, opt)
expected = np.array(expected_data).reshape(target_shape)
resized = y.eval()
self.assertAllClose(resized, expected, atol=1e-5)
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [128, 64,
64, 128,
50, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
128.0, 96.0, 64.0, 64.0,
96.0, 96.0, 96.0, 96.0,
64.0, 96.0, 128.0, 128.0,
57.0, 85.5, 114.0, 114.0,
50.0, 75.0, 100.0, 100.0,
50.0, 75.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
128.0, 128.0, 64.0, 64.0,
128.0, 128.0, 64.0, 64.0,
64.0, 64.0, 128.0, 128.0,
64.0, 64.0, 128.0, 128.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.AREA] = [
128.0, 128.0, 64.0, 64.0,
128.0, 128.0, 64.0, 64.0,
64.0, 64.0, 128.0, 128.0,
64.0, 64.0, 128.0, 128.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.AREA]:
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width, opt)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [128, 128, 64, 64, 128, 128, 64, 64,
64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [128, 135, 96, 55, 64, 114, 134, 128,
78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84,
74, 70, 95, 122, 115, 69, 49, 55,
100, 105, 75, 43, 50, 89, 105, 100,
57, 54, 74, 96, 91, 65, 55, 58,
70, 69, 75, 81, 80, 72, 69, 70,
105, 112, 75, 36, 45, 92, 111, 105]
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width,
image_ops.ResizeMethod.BICUBIC)
resized = y.eval()
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [128, 64, 32, 16, 8, 4,
4, 8, 16, 32, 64, 128,
128, 64, 32, 16, 8, 4,
5, 10, 15, 20, 25, 30,
30, 25, 20, 15, 10, 5,
5, 10, 15, 20, 25, 30]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [73, 33, 23, 39,
73, 33, 23, 39,
14, 16, 19, 21,
14, 16, 19, 21]
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width,
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = y.eval()
self.assertAllClose(resized, expected, atol=1)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, original, original_shape,
expected, expected_shape):
x_np = np.array(original, dtype=np.uint8).reshape(original_shape)
y_np = np.array(expected).reshape(expected_shape)
target_height = expected_shape[0]
target_width = expected_shape[1]
with self.test_session():
image = constant_op.constant(x_np, shape=original_shape)
y = image_ops.resize_image_with_crop_or_pad(image,
target_height,
target_width)
resized = y.eval()
self.assertAllClose(resized, y_np, atol=1e-5)
def testBasic(self):
# Basic no-op.
original = [1, 2, 3, 4,
5, 6, 7, 8]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
original, [2, 4, 1])
def testPad(self):
# Pad even along col.
original = [1, 2, 3, 4, 5, 6, 7, 8]
expected = [0, 1, 2, 3, 4, 0,
0, 5, 6, 7, 8, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [2, 6, 1])
# Pad odd along col.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 1, 2, 3, 4, 0, 0,
0, 5, 6, 7, 8, 0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [2, 7, 1])
# Pad even along row.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [4, 4, 1])
# Pad odd along row.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0,
0, 0, 0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [5, 4, 1])
def testCrop(self):
# Crop even along col.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [2, 3,
6, 7]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [2, 2, 1])
# Crop odd along col.
original = [1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12]
expected = [2, 3, 4,
8, 9, 10]
self._ResizeImageWithCropOrPad(original, [2, 6, 1],
expected, [2, 3, 1])
# Crop even along row.
original = [1, 2,
3, 4,
5, 6,
7, 8]
expected = [3, 4,
5, 6]
self._ResizeImageWithCropOrPad(original, [4, 2, 1],
expected, [2, 2, 1])
# Crop odd along row.
original = [1, 2,
3, 4,
5, 6,
7, 8,
9, 10,
11, 12,
13, 14,
15, 16]
expected = [3, 4,
5, 6,
7, 8,
9, 10,
11, 12]
self._ResizeImageWithCropOrPad(original, [8, 2, 1],
expected, [5, 2, 1])
def testCropAndPad(self):
# Pad along row but crop along col.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 0,
2, 3,
6, 7,
0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [4, 2, 1])
# Crop along row but pad along col.
original = [1, 2,
3, 4,
5, 6,
7, 8]
expected = [0, 3, 4, 0,
0, 5, 6, 0]
self._ResizeImageWithCropOrPad(original, [4, 2, 1],
expected, [2, 4, 1])
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ('tensorflow/core/lib/jpeg/testdata/'
'jpeg_merge_test1.jpg')
with self.test_session() as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 0.8)
def testSynthetic(self):
with self.test_session() as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0)
image2 = image_ops.decode_jpeg(image_ops.encode_jpeg(image1))
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testShape(self):
with self.test_session() as sess:
jpeg = constant_op.constant('nonsense')
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = 'tensorflow/core/lib/png/testdata/'
inputs = (1, 'lena_gray.png'), (4, 'lena_rgba.png')
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.test_session() as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = sess.run([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, image1.eval())
def testSynthetic(self):
with self.test_session() as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testShape(self):
with self.test_session() as sess:
png = constant_op.constant('nonsense')
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.test_session():
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
def testNoConvert(self):
# Make sure converting to the same data type creates no ops
with self.test_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(image, y)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.test_session():
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.test_session():
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.test_session():
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
if __name__ == '__main__':
googletest.main()
| python | 35,478 |
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%% FUNCTION %%%%%%%%%%%%%%%%%%%
#
# user = username SPENVIS external INPUT %%%%
# password = password SPENVIS external INPUT %%%%
# proj = project name SPENVIS external INPUT %%%%
# lifetime = mission lifetime [number of years] external INPUT %%%%
# day = day starting the mission external INPUT %%%%
# month = month starting of the mission external INPUT %%%%
# h = altitude circular orbit external INPUT %%%%
# i = inclination circular orbit external INPUT %%%%
# Al_eq = equivalent Al shielding external INPUT %%%%
# n_devices = number of new devices external INPUT %%%%
# data_devices = data of the the devices external INPUT %%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%% DESCRIPTION %%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# ATTENTION!! It's necessary to run SPENVIS with the same project name
# inserted in SPENVIS by the user
#
# user = username SPENVIS external INPUT %%%%%%%%%%%%%%%%%
# password = password SPENVIS external INPUT %%%%%%%%%%%%%%%%%
# proj = project name SPENVIS external INPUT %%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%% orbit generator %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT 1 %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Planet --> Earth
# Trajectory Generation --> use orbit generator
# Number of mission segments: --> 1
# Mission End --> total mission duration ---> number of years -->
#
# lifetime = number of years external INPUT %%%%%%%%%%%%%%%%%
#
# Satellite orientation: --> one axis parallel to the velocity vector
# Account for solar radiation pressure: --> no
# Account for atmospheric drag: --> no
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT 2 %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Segment title --> (no title)
# Orbit type --> general
# orbit start -->
#
# calendare date --> [dd/mm/yyyy]] external INPUT %%%%%%%%%%%%%%%
#
# Hour --> [00:00:00]
# Representative --> trajectory duration --> 1
# Altitude specification --> altitude for a circular orbit
#
# Altitude [km] --> external INPUT %%%%%%%%%%%%%%%
# Inclination [deg] --> external INPUT %%%%%%%%%%%%%%%
#
# R. asc. of asc. node [deg w.r.t. gamma50] --> 0
# Argument of perigee [deg] --> 0
# %Output resolution --> default
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% saving INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Radiation Sources and effects %%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%% Radiation sources %%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%% Trapped Proton and Electron Fluxes %%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %standard models
# Proton model --> AP-8
# Electron model --> AE-8
# Model version (proton) --> solar maximum
# Threshold flux for exposure (proton) --> 1
# Model version (electron) --> solar maximum
# do not include local time variation
# Confidence level --> 50.000%
# Threshold flux for exposure (electron) --> 1
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%% Short-term solar particle fluxes (only for SEU) %%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Solar particle flux model --> CREME-96
# ion range --> H to Ni
# worst day
# Magnetic shielding --> default
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% Long-term solar particle fluences %%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Solar particle model --> ESP-PSYCHIC (total fluence)
# ion range --> H to Ni
# Prediction period --> automatic
# offset in solar cycle --> automatic
# Confidence level [%] --> 95.0
# Magnetic shielding --> default
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Galactic cosmic ray fluxes %%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# ion range --> H to Ni
# GCR model at 1 AU--> ISO 15390
# model -->ISO-15390 standard model
# solar activity data --> mission epoch
# Magnetic shielding --> default
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Long-term radiation doses %%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%% Ionizing dose for simple geometries %%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT %%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Shielding depths --> table of values
# Al shielding --> external INPUT %%%%%%%%%%%%%%%
# Dose model --> SHIELDOSE-2
# Shielding configuration --> centre of Al spheres
# Target material --> Silicon
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%% Single Event Effects %%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%% Short-term SEU rates and LET spectra %%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT %%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Device number (max 15) ---> INPUT external %%%%%%%%%%%
# Device material --> Si (CREME-86)
# Device source --> user defined
# Device Name ---> INPUT external %%%%%%%%%%%
# Shape Sensitive volume ---> rectangular parallelepiped (3D)
# Dimensions --> INPUT external %%%%%%%%
# %%%%Models Weibull function and Bendel function (cross section methods)
# Drirect ionization upset rates ---> INPUT external %%%%%%%%%%%
# Algorithm --> constant LET (CREME)
# %%%%%Models Weibull function and Bendel function
# Proton induced upset rates --> INPUT external %%%%%%%%%%%
#
# % solar particles + trapped protons + GCR particles
# mission segment averages
#
# Al Equivalent shielding --> INPUT external %%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%% Long-term SEU rates and LET spectra %%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%% internal INPUT %%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Device number (max 15) ---> INPUT external %%%%%%%%%%%
# Device material --> Si (CREME-86)
# Device source --> user defined
# Device Name ---> INPUT external %%%%%%%%%%%
# Shape Sensitive volume ---> rectangular parallelepiped (3D)
# Dimensions --> INPUT external %%%%%%%%
# %%%%Models Weibull function and Bendel function (cross section methods)
# Drirect ionization upset rates ---> INPUT external %%%%%%%%%%%
# Algorithm --> constant LET (CREME)
# %%%%Models Weibull function and Bendel function
# Proton induced upset rates --> INPUT external %%%%%%%%%%%
#
# solar particles + trapped protons + GCR particles
# mission segment averages
# Al Equivalent shielding --> INPUT external %%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import requests
def do_post(url, user, password, values):
return requests.post(url, data=values, auth=(user, password))
def SPENVIS_interface_f(user, password, proj, lifetime, day, month, year, h, i, OMEGA, omega, theta):
url = 'https://www.spenvis.oma.be/htbin/spenvis.exe/' + proj
# personal username shall be selected by the user and inserted in ADV_USERinputCEDH.xml for radiation model
values = {
'action': 'cleanup',
'SWITCH': '1',
'TODELETE': proj,
'#cleanUp()#deleteFile(project.cgi)#ResetToPrevious(packages.html)': 'Execute'}
# ('Delete previous results')
do_post(url, user, password, values)
# ('Orbit Generator...');
# ('Input #1');
values = {
'_JS_SUBMIT': '#saveform(sapre_mis.html)#resetToPrevious(sapre_mis.html)',
'PLANET': '3',
'ORBGEN': '1',
'NTRAJ': '1',
'IMISD': '0',
'MISDUR': lifetime,
'JMISD':'0',
'KATT':'2',
'ISRP':'0',
'IDRAG':'0',
'#deleteFile(orbitp.cgi)#saveForm(sapre_mis.html,orbitn)#saveForm(sapre_mis.html)#ResetToPrevious(sapre_orb.html,orbit1)': 'Next >>',
'PRODEF': proj,
'PROTIT': '',
'ORBITN': '0'}
do_post(url, user, password, values)
# ('General circular orbit')
# ('Input #2...')
values = {
'_JS_SUBMIT': '#saveform(sapre_orb.html,orbit1)#resetToPrevious(sapre_orb.html,orbit1)',
'TITLE': '',
'TYPE': 'GEN',
'ISTART': '1',
'OEDAY': day,
'OEMON': month,
'OEYEAR': year,
'OEHRS': '0',
'OEMIN': '0',
'OESEC': '0',
'IDUR': '1',
'EPDUR': '1',
'IAE': '2',
'ALT': h,
'RINCL': i,
'IAN': '0',
'RAAN': OMEGA,
'ARGPER': omega,
'TRANO': theta,
'DT1': '60.0',
'DH2': '20000.0',
'DT2': '240.0',
'DH3': '80000.0',
'DT3': '3600.0',
'ORBITN': '1',
'#saveForm(sapre_orb.html,orbit1)#deleteFile(orbitp.cgi)#saveForm(sapre_orb.html,orbitn)#ResetToPrevious(sapre_sum.html)': 'Next >>',
'STEP': '0.5'}
do_post(url, user, password, values)
# ('Saving inputs...')
values = {
'_JS_SUBMIT': '#saveform(sapre_sum.html)#resetToPrevious(sapre_sum.html)',
'ORBITN': '2',
'#deleteFile(orbitn.cgi)#saveForm(sapre_sum.html,orbitp)#saveForm(sapre_sum.html)#namelist(mission[sapre_mis],sapre[orbit1])#runModel(sapre)#ResetToPrevious(sapre_out.html)': 'Run'}
do_post(url, user, password, values)
# ('Radiation sources')
# ('Trapped Proton and Electron Fluxes...')
values = {
'_JS_SUBMIT': '#saveform(trep_par.html)#resetToPrevious(trep_par.html)',
'TRPMOD': '1',
'TREMOD': '1',
'MINDP': '1',
'FLUXTHP': '1.00',
'MINDE': '1',
'ILTV': '0',
'ISIG': '0',
'FLUXTHE': '1.00',
'#saveForm(trep_par.html)#namelist(trep[trep_par.html])#deleteFile(ae9ap9_par.cgi)#runModel(trep)#ResetToPrevious(trep_out.html)': 'Run',
'NENERP': '30',
'PROEN(1)': '0.1',
'PROEN(2)': '0.15',
'PROEN(3)': '0.2',
'PROEN(4)': '0.3',
'PROEN(5)': '0.4',
'PROEN(6)': '0.5',
'PROEN(7)': '0.6',
'PROEN(8)': '0.7',
'PROEN(9)': '1.0',
'PROEN(10)': '1.5',
'PROEN(11)': '2.0',
'PROEN(12)': '3.0',
'PROEN(13)': '4.0',
'PROEN(14)': '5.0',
'PROEN(15)': '6.0',
'PROEN(16)': '7.0',
'PROEN(17)': '10.0',
'PROEN(18)': '15.0',
'PROEN(19)': '20.0',
'PROEN(20)': '30.0',
'PROEN(21)': '40.0',
'PROEN(22)': '50.0',
'PROEN(23)': '60.0',
'PROEN(24)': '70.0',
'PROEN(25)': '100.0',
'PROEN(26)': '150.0',
'PROEN(27)': '200.0',
'PROEN(28)': '300.0',
'PROEN(29)': '400.0',
'PROEN(30)': '500.0',
'NENERE': '30',
'ELEEN(1)': '0.04',
'ELEEN(2)': '0.1',
'ELEEN(3)': '0.2',
'ELEEN(4)': '0.3',
'ELEEN(5)': '0.4',
'ELEEN(6)': '0.5',
'ELEEN(7)': '0.6',
'ELEEN(8)': '0.7',
'ELEEN(9)': '0.8',
'ELEEN(10)': '1.0',
'ELEEN(11)': '1.25',
'ELEEN(12)': '1.5',
'ELEEN(13)': '1.75',
'ELEEN(14)': '2.0',
'ELEEN(15)': '2.25',
'ELEEN(16)': '2.5',
'ELEEN(17)': '2.75',
'ELEEN(18)': '3.0',
'ELEEN(19)': '3.25',
'ELEEN(20)': '3.5',
'ELEEN(21)': '3.75',
'ELEEN(22)': '4.0',
'ELEEN(23)': '4.25',
'ELEEN(24)': '4.5',
'ELEEN(25)': '4.75',
'ELEEN(26)': '5.0',
'ELEEN(27)': '5.5',
'ELEEN(28)': '6.0',
'ELEEN(29)': '6.5',
'ELEEN(30)': '7.0'}
do_post(url, user, password, values)
# ('Short-term solar particle fluxes...')
values = {
'_JS_SUBMIT': '#saveform(sepflare_par.html)#resetToPrevious(sepflare_par.html)',
'FLAREMOD': '1',
'ION1': '1',
'ION2': '92',
'CREME96': '2',
'#saveForm(sepflare_par.html)#namelist(sepflare[sepflare_par+magshielding_par])#RunModel(sepflare)#ResetToPrevious(sepflare_out.html)': 'Run',
'NENERS': '75',
'ENERFL(1)': '0.10',
'ENERFL(2)': '0.11',
'ENERFL(3)': '0.12',
'ENERFL(4)': '0.14',
'ENERFL(5)': '0.16',
'ENERFL(6)': '0.18',
'ENERFL(7)': '0.20',
'ENERFL(8)': '0.22',
'ENERFL(9)': '0.25',
'ENERFL(10)': '0.28',
'ENERFL(11)': '0.32',
'ENERFL(12)': '0.35',
'ENERFL(13)': '0.40',
'ENERFL(14)': '0.45',
'ENERFL(15)': '0.5',
'ENERFL(16)': '0.55',
'ENERFL(17)': '0.63',
'ENERFL(18)': '0.71',
'ENERFL(19)': '0.80',
'ENERFL(20)': '0.90',
'ENERFL(21)': '1.0',
'ENERFL(22)': '1.1',
'ENERFL(23)': '1.2',
'ENERFL(24)': '1.4',
'ENERFL(25)': '1.6',
'ENERFL(26)': '1.8',
'ENERFL(27)': '2.0',
'ENERFL(28)': '2.2',
'ENERFL(29)': '2.5',
'ENERFL(30)': '2.8',
'ENERFL(31)': '3.2',
'ENERFL(32)': '3.5',
'ENERFL(33)': '4.0',
'ENERFL(34)': '4.5',
'ENERFL(35)': '5.0',
'ENERFL(36)': '5.5',
'ENERFL(37)': '6.3',
'ENERFL(38)': '7.1',
'ENERFL(39)': '8.0',
'ENERFL(40)': '9.0',
'ENERFL(41)': '10.0',
'ENERFL(42)': '11.0',
'ENERFL(43)': '12.0',
'ENERFL(44)': '14.0',
'ENERFL(45)': '16.0',
'ENERFL(46)': '18.0',
'ENERFL(47)': '20.0',
'ENERFL(48)': '22.0',
'ENERFL(49)': '25.0',
'ENERFL(50)': '28.0',
'ENERFL(51)': '32.0',
'ENERFL(52)': '35.0',
'ENERFL(53)': '40.0',
'ENERFL(54)': '45.0',
'ENERFL(55)': '50.0',
'ENERFL(56)': '55.0',
'ENERFL(57)': '63.0',
'ENERFL(58)': '71.0',
'ENERFL(59)': '80.0',
'ENERFL(60)': '90.0',
'ENERFL(61)': '100.0',
'ENERFL(62)': '110.0',
'ENERFL(63)': '120.0',
'ENERFL(64)': '140.0',
'ENERFL(65)': '160.0',
'ENERFL(66)': '180.0',
'ENERFL(67)': '200.0',
'ENERFL(68)': '220.0',
'ENERFL(69)': '250.0',
'ENERFL(70)': '280.0',
'ENERFL(71)': '320.0',
'ENERFL(72)': '350.0',
'ENERFL(73)': '400.0',
'ENERFL(74)': '450.0',
'ENERFL(75)': '500.0'}
do_post(url, user, password, values)
# ('Long-term solar particle fluences...');
values = {
'_JS_SUBMIT': '#saveform(sepflare_par.html)#resetToPrevious(sepflare_par.html)',
'FLAMOD': '4',
'ION1': '1',
'ION2': '92',
'ITFLARE': '0',
'ISTART': '0',
'FLPROB': '95.0',
'#saveForm(sep_par.html)#namelist(sep[sep_par+magshielding_par])#RunModel(sep)#ResetToPrevious(sep_out.html)': 'Run',
'NENERS': '75',
'ENERFL(1)': '0.10',
'ENERFL(2)': '0.11',
'ENERFL(3)': '0.12',
'ENERFL(4)': '0.14',
'ENERFL(5)': '0.16',
'ENERFL(6)': '0.18',
'ENERFL(7)': '0.20',
'ENERFL(8)': '0.22',
'ENERFL(9)': '0.25',
'ENERFL(10)': '0.28',
'ENERFL(11)': '0.32',
'ENERFL(12)': '0.35',
'ENERFL(13)': '0.40',
'ENERFL(14)': '0.45',
'ENERFL(15)': '0.5',
'ENERFL(16)': '0.55',
'ENERFL(17)': '0.63',
'ENERFL(18)': '0.71',
'ENERFL(19)': '0.80',
'ENERFL(20)': '0.90',
'ENERFL(21)': '1.0',
'ENERFL(22)': '1.1',
'ENERFL(23)': '1.2',
'ENERFL(24)': '1.4',
'ENERFL(25)': '1.6',
'ENERFL(26)': '1.8',
'ENERFL(27)': '2.0',
'ENERFL(28)': '2.2',
'ENERFL(29)': '2.5',
'ENERFL(30)': '2.8',
'ENERFL(31)': '3.2',
'ENERFL(32)': '3.5',
'ENERFL(33)': '4.0',
'ENERFL(34)': '4.5',
'ENERFL(35)': '5.0',
'ENERFL(36)': '5.5',
'ENERFL(37)': '6.3',
'ENERFL(38)': '7.1',
'ENERFL(39)': '8.0',
'ENERFL(40)': '9.0',
'ENERFL(41)': '10.0',
'ENERFL(42)': '11.0',
'ENERFL(43)': '12.0',
'ENERFL(44)': '14.0',
'ENERFL(45)': '16.0',
'ENERFL(46)': '18.0',
'ENERFL(47)': '20.0',
'ENERFL(48)': '22.0',
'ENERFL(49)': '25.0',
'ENERFL(50)': '28.0',
'ENERFL(51)': '32.0',
'ENERFL(52)': '35.0',
'ENERFL(53)': '40.0',
'ENERFL(54)': '45.0',
'ENERFL(55)': '50.0',
'ENERFL(56)': '55.0',
'ENERFL(57)': '63.0',
'ENERFL(58)': '71.0',
'ENERFL(59)': '80.0',
'ENERFL(60)': '90.0',
'ENERFL(61)': '100.0',
'ENERFL(62)': '110.0',
'ENERFL(63)': '120.0',
'ENERFL(64)': '140.0',
'ENERFL(65)': '160.0',
'ENERFL(66)': '180.0',
'ENERFL(67)': '200.0',
'ENERFL(68)': '220.0',
'ENERFL(69)': '250.0',
'ENERFL(70)': '280.0',
'ENERFL(71)': '320.0',
'ENERFL(72)': '350.0',
'ENERFL(73)': '400.0',
'ENERFL(74)': '450.0',
'ENERFL(75)': '500.0'}
do_post(url, user, password, values)
# ('Galactic Cosmic Ray fluxes...');
values = {
'_JS_SUBMIT': '#saveform(gcr_par.html)#resetToPrevious(gcr_par.html)',
'IELM': '1',
'JELM': '92',
'GCRMOD': '3',
'MQ':'9677',
'#saveForm(gcr_par.html)#namelist(gcr[gcr_par+magshielding_par])#RunModel(gcr)#ResetToPrevious(gcr_out.html)': 'Run'}
do_post(url, user, password, values)
# ('Ionizing dose for simple geometries...');
values = {
'_JS_SUBMIT': '#saveform(dose_sd.html)#resetToPrevious(dose_sd.html)',
'JSHLD': '0',
'DOSMOD': '2',
'ISHLD': '3',
'IDET': '3',
'#saveForm(dose_sd.html)#saveForm(dose_sd.html,sd2)#namelist(sd2[dose_sd.html])#RunModel(sd2)#ResetToPrevious(sd2_out.html)': 'Run',
'SSAT_DOSE': '0'}
do_post(url, user, password, values)
# OUTPUT
# ('Save results from Ionizing dose for simple geometries...');
string = '?%23sendResult(spenvis_s2o.txt)'
URL = url + string
results = requests.get(URL, auth = (user, password))
file = open('resultsSPENVIS.txt','w')
file.write(results.text)
file.close()
| python | 21,033 |
from .GridConfiguration import GridConfiguration
from data.scaling import ScalerType, StandardScaler, RangeScaler
from collections import namedtuple
grid_scaling = namedtuple("grid_scaling", ["grids", "scaler"])
__scaler_types__ = {
ScalerType.STANDARD: StandardScaler,
ScalerType.RANGE: RangeScaler
}
class GridConfigurator(object):
def __init__(self):
pass
def build_grids(self, config):
assert isinstance(config, dict)
assert "input_lr" in config or "input_hr" in config
assert "target" in config
input_grids_lr = []
input_scalings_lr = []
if "input_lr" in config:
grid_list = config["input_lr"]
input_grids_lr, input_scalings_lr = self._read_grid_list(grid_list)
input_grids_hr = []
input_scalings_hr = []
if "input_hr" in config:
grid_list = config["input_hr"]
input_grids_hr, input_scalings_hr = self._read_grid_list(grid_list)
grid_list = config["target"]
target_grids, target_scalings = self._read_grid_list(grid_list)
return GridConfiguration(
input_grids_lr, input_grids_hr, target_grids,
input_scalings_lr, input_scalings_hr, target_scalings
)
def _read_grid_list(self, grid_list):
grid_names = []
scalings = []
if grid_list is None:
return grid_names, scalings
if not isinstance(grid_list, (list, tuple)):
assert isinstance(grid_list, str)
grid_list = [grid_list]
for grid_config in grid_list:
if isinstance(grid_config, str):
current_grid_names = [grid_config]
scaler = None
elif isinstance(grid_config, list):
if len(grid_config) == 0:
continue
current_grid_names = grid_config[0]
if current_grid_names is None:
continue
if not isinstance(current_grid_names, list):
assert isinstance(current_grid_names, str)
current_grid_names = [current_grid_names]
scaler = None
if len(grid_config) > 1:
scaler_config = grid_config[1]
if scaler_config is not None:
channels = len(current_grid_names)
scaler = self._build_scaler(channels, grid_config[1])
if len(grid_config) > 2:
raise Exception("[ERROR] Unknown configuration format.")
else:
raise Exception("[ERROR] Unknown configuration format.")
grid_names += current_grid_names
scalings.append(grid_scaling(current_grid_names, scaler))
return grid_names, scalings
def _build_scaler(self, channels, scaler_config):
if scaler_config is None:
return None
if isinstance(scaler_config, str):
scaler_config = [scaler_config]
if isinstance(scaler_config, (list, tuple)):
if len(scaler_config) == 0:
return None
scaler_type = ScalerType(scaler_config[0].upper())
kwargs = {}
if len(scaler_config) > 1:
scaler_opts = scaler_config[1]
if scaler_opts is None:
scaler_opts = {}
assert isinstance(scaler_opts, dict)
kwargs.update(scaler_opts)
if len(scaler_config) > 2:
raise Exception("[ERROR] Unknown configuration format.")
elif isinstance(scaler_config, dict):
if len(scaler_config) == 0:
return None
assert "type" in scaler_config
scaler_type = ScalerType(scaler_config["type"].upper())
kwargs = {}
if "options" in scaler_config:
scaler_opts = scaler_config["options"]
assert isinstance(scaler_opts, dict)
kwargs.update(scaler_opts)
else:
raise Exception("[ERROR] Unknown configuration format.")
scaler_constructor = __scaler_types__[scaler_type]
return scaler_constructor(channels=channels, **kwargs) | python | 4,247 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from .serializers import StandardSerializer, MarksheetSerializer
from rest_framework import viewsets
from models import Marksheet, Standard
# Create your views here.
def index(request):
if not request.user.is_authenticated:
return render(request, "result/index.html")
else:
return render(request, "accounts/profile.html")
class MarksheetViewSet(viewsets.ModelViewSet):
queryset = Marksheet.objects.all()
serializer_class = MarksheetSerializer
class StandardViewSet(viewsets.ModelViewSet):
queryset = Standard.objects.all()
serializer_class = StandardSerializer
| python | 709 |
from opentrons import protocol_api
from opentrons.drivers.rpi_drivers import gpio
import time
import math
# Metadata
metadata = {
'protocolName': 'S3 Station C Version 1',
'author': 'Nick <[email protected]>, Sara <[email protected]>, Miguel <[email protected]>',
'source': 'Custom Protocol Request',
'apiLevel': '2.1'
}
# Parameters to adapt the protocol
NUM_SAMPLES = 96
MM_LABWARE = 'opentrons aluminum block'
MMTUBE_LABWARE = '2ml tubes'
PCR_LABWARE = 'opentrons aluminum nest plate'
ELUTION_LABWARE = 'opentrons aluminum nest plate'
PREPARE_MASTERMIX = True
MM_TYPE = 'MM1'
TRANSFER_MASTERMIX = True
TRANSFER_SAMPLES = True
"""
NUM_SAMPLES is the number of samples, must be an integer number
MM_LABWARE must be one of the following:
opentrons plastic block
pentrons aluminum block
covidwarriors aluminum block
MMTUBE_LABWARE must be one of the following:
2ml tubes
PCR_LABWARE must be one of the following:
opentrons aluminum biorad plate
opentrons aluminum nest plate
opentrons aluminum strip short
covidwarriors aluminum biorad plate
covidwarriors aluminum biorad strip short
ELUTION_LABWARE must be one of the following:
opentrons plastic 2ml tubes
opentrons plastic 1.5ml tubes
opentrons aluminum 2ml tubes
opentrons aluminum 1.5ml tubes
covidwarriors aluminum 2ml tubes
covidwarriors aluminum 1.5ml tubes
opentrons aluminum biorad plate
opentrons aluminum nest plate
covidwarriors aluminum biorad plate
opentrons aluminum strip alpha
opentrons aluminum strip short
covidwarriors aluminum biorad strip alpha
covidwarriors aluminum biorad strip short
PREPARE_MASTERMIX: True or False
MM_TYPE must be one of the following:
MM1
MM2
MM3
TRANSFER_MASTERMIX: True or False
TRANSFER_SAMPLES: True or False
"""
# Calculated variables
if MM_TYPE == 'MM3':
VOLUME_MMIX = 15
else:
VOLUME_MMIX = 20
# Constants
MM_LW_DICT = {
'opentrons plastic block': 'opentrons_24_tuberack_generic_2ml_screwcap',
'opentrons aluminum block': 'opentrons_24_aluminumblock_generic_2ml_screwcap',
'covidwarriors aluminum block': 'covidwarriors_aluminumblock_24_screwcap_2000ul'
}
PCR_LW_DICT = {
'opentrons aluminum biorad plate': 'opentrons_96_aluminumblock_biorad_wellplate_200ul',
'opentrons aluminum nest plate': 'opentrons_96_aluminumblock_nest_wellplate_100ul',
'opentrons aluminum strip short': 'opentrons_aluminumblock_96_pcrstrips_100ul',
'covidwarriors aluminum biorad plate': 'covidwarriors_aluminumblock_96_bioradwellplate_200ul',
'covidwarriors aluminum biorad strip short': 'covidwarriors_aluminumblock_96_bioradwellplate_pcrstrips_100ul'
}
EL_LW_DICT = {
# tubes
'opentrons plastic 2ml tubes': 'opentrons_24_tuberack_generic_2ml_screwcap',
'opentrons plastic 1.5ml tubes': 'opentrons_24_tuberack_nest_1.5ml_screwcap',
'opentrons aluminum 2ml tubes': 'opentrons_24_aluminumblock_generic_2ml_screwcap',
'opentrons aluminum 1.5ml tubes': 'opentrons_24_aluminumblock_nest_1.5ml_screwcap',
'covidwarriors aluminum 2ml tubes': 'covidwarriors_aluminumblock_24_screwcap_2000ul',
'covidwarriors aluminum 1.5ml tubes': 'covidwarriors_aluminumblock_24_screwcap_2000ul',
# PCR plate
'opentrons aluminum biorad plate': 'opentrons_96_aluminumblock_biorad_wellplate_200ul',
'opentrons aluminum nest plate': 'opentrons_96_aluminumblock_nest_wellplate_100ul',
'covidwarriors aluminum biorad plate': 'covidwarriors_aluminumblock_96_bioradwellplate_200ul',
# Strips
#'large strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',
#'short strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',
'opentrons aluminum strip alpha': 'opentrons_aluminumblock_96_pcrstripsalpha_200ul',
'opentrons aluminum strip short': 'opentrons_aluminumblock_96_pcrstrips_100ul',
'covidwarriors aluminum biorad strip alpha': 'covidwarriors_aluminumblock_96_bioradwellplate_pcrstripsalpha_200ul',
'covidwarriors aluminum biorad strip short': 'covidwarriors_aluminumblock_96_bioradwellplate_pcrstrips_100ul'
}
MMTUBE_LW_DICT = {
# Radius of each possible tube
'2ml tubes': 4
}
# Function definitions
def check_door():
return gpio.read_window_switches()
def confirm_door_is_closed(ctx):
#Check if door is opened
if check_door() == False:
#Set light color to red and pause
gpio.set_button_light(1,0,0)
ctx.pause(f"Please, close the door")
time.sleep(3)
confirm_door_is_closed(ctx)
else:
#Set light color to green
gpio.set_button_light(0,1,0)
def finish_run():
#Set light color to blue
gpio.set_button_light(0,0,1)
def get_source_dest_coordinates(ELUTION_LABWARE, source_racks, pcr_plate):
if 'strip' in ELUTION_LABWARE:
sources = [
tube
for i, rack in enumerate(source_racks)
for col in [
rack.columns()[c] if i < 2 else rack.columns()[c+1]
for c in [0, 5, 10]
]
for tube in col
][:NUM_SAMPLES]
dests = pcr_plate.wells()[:NUM_SAMPLES]
elif 'plate' in ELUTION_LABWARE:
sources = source_racks.wells()[:NUM_SAMPLES]
dests = pcr_plate.wells()[:NUM_SAMPLES]
else:
sources = [
tube
for rack in source_racks for tube in rack.wells()][:NUM_SAMPLES]
dests = [
well
for v_block in range(2)
for h_block in range(2)
for col in pcr_plate.columns()[6*v_block:6*(v_block+1)]
for well in col[4*h_block:4*(h_block+1)]][:NUM_SAMPLES]
return sources, dests
def get_mm_height(volume):
# depending on the volume in tube, get mm fluid height
height = volume // (3.14 * (MMTUBE_LW_DICT[MMTUBE_LABWARE] ** 2))
height -= 18
if height < 5:
return 1
else:
return height
def homogenize_mm(mm_tube, p300, times=5):
# homogenize mastermix tube a given number of times
p300.pick_up_tip()
volume = VOLUME_MMIX * NUM_SAMPLES
volume_height = get_mm_height(volume)
#p300.mix(5, 200, mm_tube.bottom(5))
for i in range(times):
for j in range(5):
# depending on the number of samples, start at a different height and move as it aspires
if volume_height < 12:
p300.aspirate(40, mm_tube.bottom(1))
else:
aspirate_height = volume_height-(3*j)
p300.aspirate(40, mm_tube.bottom(aspirate_height))
# empty pipete
p300.dispense(200, mm_tube.bottom(volume_height))
# clow out before dropping tip
p300.blow_out(mm_tube.top(-2))
p300.drop_tip()
def prepare_mastermix(MM_TYPE, mm_rack, p300, p20):
# setup mastermix coordinates
""" mastermix component maps """
mm1 = {
tube: vol
for tube, vol in zip(
[well for col in mm_rack.columns()[2:5] for well in col][:10],
[2.85, 12.5, 0.4, 1, 1, 0.25, 0.25, 0.5, 0.25, 1]
)
}
mm2 = {
tube: vol
for tube, vol in zip(
[mm_rack.wells_by_name()[well] for well in ['A3', 'C5', 'D5']],
[10, 4, 1]
)
}
mm3 = {
tube: vol
for tube, vol in zip(
[mm_rack.wells_by_name()[well] for well in ['A6', 'B6']],
[13, 2]
)
}
mm_dict = {'MM1': mm1, 'MM2': mm2, 'MM3': mm3}
# create mastermix
mm_tube = mm_rack.wells()[0]
for tube, vol in mm_dict[MM_TYPE].items():
mm_vol = vol*(NUM_SAMPLES+5)
disp_loc = mm_tube.top(-10)
pip = p300 if mm_vol > 20 else p20
pip.pick_up_tip()
#pip.transfer(mm_vol, tube.bottom(0.5), disp_loc, air_gap=2, touch_tip=True, new_tip='never')
air_gap_vol = 5
num_transfers = math.ceil(mm_vol/(200-air_gap_vol))
for i in range(num_transfers):
if i == 0:
transfer_vol = mm_vol % (200-air_gap_vol)
else:
transfer_vol = (200-air_gap_vol)
pip.transfer(transfer_vol, tube.bottom(0.5), disp_loc, air_gap=air_gap_vol, new_tip='never')
pip.blow_out(disp_loc)
pip.aspirate(5, mm_tube.top(2))
pip.drop_tip()
# homogenize mastermix
homogenize_mm(mm_tube, p300)
return mm_tube
def transfer_mastermix(mm_tube, dests, VOLUME_MMIX, p300, p20):
max_trans_per_asp = 8 #230//(VOLUME_MMIX+5)
split_ind = [ind for ind in range(0, NUM_SAMPLES, max_trans_per_asp)]
dest_sets = [dests[split_ind[i]:split_ind[i+1]]
for i in range(len(split_ind)-1)] + [dests[split_ind[-1]:]]
pip = p300 if VOLUME_MMIX >= 20 else p20
pip.pick_up_tip()
# get initial fluid height to avoid overflowing mm when aspiring
mm_volume = VOLUME_MMIX * NUM_SAMPLES
volume_height = get_mm_height(mm_volume)
for set in dest_sets:
# check height and if it is low enought, aim for the bottom
if volume_height < 5:
disp_loc = mm_tube.bottom(1)
else:
# reclaculate volume height
mm_volume -= VOLUME_MMIX * max_trans_per_asp
volume_height = get_mm_height(mm_volume)
disp_loc = mm_tube.bottom(volume_height)
pip.aspirate(4, disp_loc)
pip.distribute(VOLUME_MMIX, disp_loc, [d.bottom(2) for d in set],
air_gap=1, disposal_volume=0, new_tip='never')
pip.blow_out(disp_loc)
pip.drop_tip()
def transfer_samples(ELUTION_LABWARE, sources, dests, p20):
# height for aspiration has to be different depending if you ar useing tubes or wells
if 'strip' in ELUTION_LABWARE or 'plate' in ELUTION_LABWARE:
height = 1.5
else:
height = 1
# transfer
for s, d in zip(sources, dests):
p20.pick_up_tip()
p20.transfer(7, s.bottom(height), d.bottom(2), air_gap=2, new_tip='never')
#p20.mix(1, 10, d.bottom(2))
#p20.blow_out(d.top(-2))
p20.aspirate(1, d.top(-2))
p20.drop_tip()
# RUN PROTOCOL
def run(ctx: protocol_api.ProtocolContext):
# confirm door is closed
if not ctx.is_simulating():
confirm_door_is_closed(ctx)
# define tips
tips20 = [
ctx.load_labware('opentrons_96_filtertiprack_20ul', slot)
for slot in ['6', '9', '8', '7']
]
tips300 = [ctx.load_labware('opentrons_96_filtertiprack_200ul', '3')]
# define pipettes
p20 = ctx.load_instrument('p20_single_gen2', 'right', tip_racks=tips20)
p300 = ctx.load_instrument('p300_single_gen2', 'left', tip_racks=tips300)
# tempdeck module
tempdeck = ctx.load_module('tempdeck', '10')
#tempdeck.set_temperature(4)
# check mastermix labware type
if MM_LABWARE not in MM_LW_DICT:
raise Exception('Invalid MM_LABWARE. Must be one of the \
following:\nopentrons plastic block\nopentrons aluminum block\ncovidwarriors aluminum block')
# load mastermix labware
mm_rack = ctx.load_labware(
MM_LW_DICT[MM_LABWARE], '11',
MM_LABWARE)
# check mastermix tube labware type
if MMTUBE_LABWARE not in MMTUBE_LW_DICT:
raise Exception('Invalid MMTUBE_LABWARE. Must be one of the \
following:\no2ml tubes')
# This one is not loaded, it contains the raius of each tube to calculate volume height
# check pcr plate
if PCR_LABWARE not in PCR_LW_DICT:
raise Exception('Invalid PCR_LABWARE. Must be one of the \
following:\nopentrons aluminum biorad plate\nopentrons aluminum nest plate\nopentrons aluminum strip short\ncovidwarriors aluminum biorad plate\ncovidwarriors aluminum biorad strip short')
# load pcr plate
pcr_plate = tempdeck.load_labware(
PCR_LW_DICT[PCR_LABWARE], 'PCR plate')
# check source (elution) labware type
if ELUTION_LABWARE not in EL_LW_DICT:
raise Exception('Invalid ELUTION_LABWARE. Must be one of the \
following:\nopentrons plastic 2ml tubes\nopentrons plastic 1.5ml tubes\nopentrons aluminum 2ml tubes\nopentrons aluminum 1.5ml tubes\ncovidwarriors aluminum 2ml tubes\ncovidwarriors aluminum 1.5ml tubes\nopentrons aluminum biorad plate\nopentrons aluminum nest plate\ncovidwarriors aluminum biorad plate\nopentrons aluminum strip alpha\nopentrons aluminum strip short\ncovidwarriors aluminum biorad strip alpha\ncovidwarriors aluminum biorad strip short')
# load elution labware
if 'plate' in ELUTION_LABWARE:
source_racks = ctx.load_labware(
EL_LW_DICT[ELUTION_LABWARE], '1',
'RNA elution labware')
else:
source_racks = [
ctx.load_labware(EL_LW_DICT[ELUTION_LABWARE], slot,
'RNA elution labware ' + str(i+1))
for i, slot in enumerate(['4', '1', '5', '2'])
]
# setup sample sources and destinations
sources, dests = get_source_dest_coordinates(ELUTION_LABWARE, source_racks, pcr_plate)
# prepare mastermix
if PREPARE_MASTERMIX:
mm_tube = prepare_mastermix(MM_TYPE, mm_rack, p300, p20)
else:
mm_tube = mm_rack.wells()[0]
if TRANSFER_MASTERMIX:
homogenize_mm(mm_tube, p300)
# transfer mastermix
if TRANSFER_MASTERMIX:
transfer_mastermix(mm_tube, dests, VOLUME_MMIX, p300, p20)
# transfer samples to corresponding locations
if TRANSFER_SAMPLES:
transfer_samples(ELUTION_LABWARE, sources, dests, p20)
finish_run()
| python | 13,450 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import argh
import os
import itertools
import re
import numpy as np
from tqdm import tqdm
def crawl(sgf_directory='sgf', print_summary=True):
max_w_upset = {'value': 0}
max_b_upset = {'value': 0}
worst_qs = []
tot_files = 0
num_resign_disabled = 0
bad_resigns = 0
bad_resign_files = []
other_thresh = 0.9
def sgfs(root, fils): return [os.path.join(root, f)
for f in fils if f.endswith('.sgf')]
fs = [i for sublist in [sgfs(root, files) for root, _, files in os.walk(
sgf_directory)] for i in sublist]
for filename in tqdm(fs):
data = open(filename).read()
result = re.search("RE\[([BWbw])\+", data)
if not result:
print("No result string found in sgf: ", filename)
continue
else:
result = result.group(1)
threshold = re.search("Resign Threshold: -(\d.\d*)", data)
if not threshold:
print("No threshold found for ", filename)
else:
threshold = float(threshold.group(1))
if threshold == 1.0:
num_resign_disabled += 1
tot_files += 1
q_values = list(map(float, re.findall("C\[(-?\d.\d*)", data)))
if result == "B":
look_for = min
else:
look_for = max
#print("%s:%s+:%s" % (filename, result, min(q_values)))
worst_qs.append(look_for(q_values))
if threshold == 1.0 and abs(look_for(q_values)) > other_thresh:
bad_resigns += 1
bad_resign_files.append(filename)
if look_for == min and min(q_values) < max_b_upset['value']:
max_b_upset = {"filename": filename,
"value": look_for(q_values)}
elif look_for == max and max(q_values) > max_w_upset['value']:
max_w_upset = {"filename": filename,
"value": max(q_values)}
if print_summary:
b_upsets = np.array([q for q in worst_qs if q < 0])
w_upsets = np.array([q for q in worst_qs if q > 0])
both = np.array(list(map(abs, worst_qs)))
print("Biggest w upset:", max_w_upset)
print("Biggest b upset:", max_b_upset)
print("99th percentiles (both/w/b)")
print(np.percentile(both, 99))
print(np.percentile(b_upsets, 1))
print(np.percentile(w_upsets, 99))
print("Bad resigns: {} / {} ({:.2f}%) ".format(bad_resigns,
num_resign_disabled, (bad_resigns / (num_resign_disabled+1)) * 100.0))
print("Total files:", tot_files)
print(bad_resign_files)
if __name__ == '__main__':
argh.dispatch_command(crawl)
| python | 3,319 |
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow_utils import DATA_IMAGE, clone_repo_cmd, gitlab_defaults, slack_failed_task
from kube_secrets import (
SNOWFLAKE_ACCOUNT,
SNOWFLAKE_LOAD_DATABASE,
SNOWFLAKE_LOAD_WAREHOUSE,
SNOWFLAKE_PASSWORD,
SNOWFLAKE_USER,
)
# Load the env vars into a dict and set Secrets
env = os.environ.copy()
pod_env_vars = {
"CI_PROJECT_DIR": "/analytics",
"SNOWFLAKE_TRANSFORM_DATABASE": "ANALYTICS",
}
# Default arguments for the DAG
default_args = {
"catchup": False,
"depends_on_past": False,
"on_failure_callback": slack_failed_task,
"owner": "airflow",
"retries": 0,
"retry_delay": timedelta(minutes=1),
"start_date": datetime(2019, 1, 1),
"dagrun_timeout": timedelta(hours=2),
}
# Create the DAG
dag = DAG("snowflake_cleanup", default_args=default_args, schedule_interval="0 5 * * 0")
# Task 1
drop_clones_cmd = f"""
{clone_repo_cmd} &&
analytics/orchestration/drop_snowflake_objects.py drop_databases
"""
purge_clones = KubernetesPodOperator(
**gitlab_defaults,
image=DATA_IMAGE,
task_id="purge-clones",
name="purge-clones",
secrets=[
SNOWFLAKE_USER,
SNOWFLAKE_PASSWORD,
SNOWFLAKE_ACCOUNT,
SNOWFLAKE_LOAD_DATABASE,
SNOWFLAKE_LOAD_WAREHOUSE,
],
env_vars=pod_env_vars,
arguments=[drop_clones_cmd],
dag=dag,
)
# Task 2
drop_dev_cmd = f"""
{clone_repo_cmd} &&
analytics/orchestration/drop_snowflake_objects.py drop_dev_schemas
"""
purge_dev_schemas = KubernetesPodOperator(
**gitlab_defaults,
image=DATA_IMAGE,
task_id="purge-dev-schemas",
name="purge-dev-schemas",
secrets=[
SNOWFLAKE_USER,
SNOWFLAKE_PASSWORD,
SNOWFLAKE_ACCOUNT,
SNOWFLAKE_LOAD_DATABASE,
SNOWFLAKE_LOAD_WAREHOUSE,
],
env_vars=pod_env_vars,
arguments=[drop_dev_cmd],
dag=dag,
)
| python | 2,030 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_by_pet_id_request(
pet_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/extensibleenums/pet/{petId}')
path_format_arguments = {
"petId": _SERIALIZER.url("pet_id", pet_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_add_pet_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/extensibleenums/pet/addPet')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class PetOperations(object):
"""PetOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~extensibleenumsswagger.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_by_pet_id(
self,
pet_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Pet"
"""get pet by id.
:param pet_id: Pet id.
:type pet_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pet, or the result of cls(response)
:rtype: ~extensibleenumsswagger.models.Pet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Pet"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_by_pet_id_request(
pet_id=pet_id,
template_url=self.get_by_pet_id.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize("Pet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_pet_id.metadata = {"url": "/extensibleenums/pet/{petId}"} # type: ignore
@distributed_trace
def add_pet(
self,
pet_param=None, # type: Optional["_models.Pet"]
**kwargs # type: Any
):
# type: (...) -> "_models.Pet"
"""add pet.
:param pet_param: pet param.
:type pet_param: ~extensibleenumsswagger.models.Pet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pet, or the result of cls(response)
:rtype: ~extensibleenumsswagger.models.Pet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Pet"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if pet_param is not None:
_json = self._serialize.body(pet_param, "Pet")
else:
_json = None
request = build_add_pet_request(
content_type=content_type,
json=_json,
template_url=self.add_pet.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize("Pet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_pet.metadata = {"url": "/extensibleenums/pet/addPet"} # type: ignore
| python | 7,086 |
"""
Stability metric example
"""
import os.path as path
import numpy as np
import matplotlib.pyplot as plt
from stability_evaluation import (RecordingBatchIterator, MeanWaveCalculator,
RecordingAugmentation,
SpikeSortingEvaluation)
ROOT = path.join(path.expanduser('~'), 'data/yass')
path_to_spike_train = path.join(ROOT, 'ej49_spikeTrain1_1.csv')
path_to_data = path.join(ROOT, 'ej49_data1_set1.bin')
path_to_geom = path.join(ROOT, 'ej49_geometry1.txt')
path_to_augmented = path.join(ROOT, 'augmented.bin')
spike_train = np.loadtxt(path_to_spike_train, dtype='int32', delimiter=',')
spike_train
br = RecordingBatchIterator(path_to_data, path_to_geom, sample_rate=30000,
batch_time_samples=1000000, n_batches=5,
n_chan=200, radius=100, whiten=False)
mwc = MeanWaveCalculator(br, spike_train)
# plot some of the recovered templates
for i in range(2):
plt.plot(mwc.templates[:, :, i])
plt.show()
# here we indicate what is the length of the augmented data in terms of
# batches (with respect to the batch iterator object.)
stab = RecordingAugmentation(mwc, augment_rate=0.25, move_rate=0.2)
# New ground truth spike train
new_gt_spt, status = stab.save_augment_recording(path_to_augmented, 5)
# Creating evaluation object for matching, TP, and FP
spt_ = spike_train[spike_train[:, 0] < 1e6, :]
tmp_ = mwc.templates[:, :, np.unique(spt_[:, 1])]
# Let's create a fake new spike train with only 100
# first units of the ground truth as clusters
spt_2 = spt_[spt_[:, 1] < 100, :]
tmp_2 = tmp_[:, :, :100]
# Here we just demonstrate with the sampe spike train
# The second argument should be a different spike train
ev = SpikeSortingEvaluation(spt_, spt_2, tmp_, tmp_2)
print(ev.true_positive)
print(ev.false_positive)
print(ev.unit_cluster_map)
| python | 1,887 |
CABLETERMINATION = """
{% if value %}
<a href="{{ value.parent.get_absolute_url }}">{{ value.parent }}</a>
<i class="mdi mdi-chevron-right"></i>
<a href="{{ value.get_absolute_url }}">{{ value }}</a>
{% else %}
—
{% endif %}
"""
CABLE_LENGTH = """
{% if record.length %}{{ record.length }} {{ record.get_length_unit_display }}{% else %}—{% endif %}
"""
CABLE_TERMINATION_PARENT = """
{% if value.device %}
<a href="{{ value.device.get_absolute_url }}">{{ value.device }}</a>
{% elif value.circuit %}
<a href="{{ value.circuit.get_absolute_url }}">{{ value.circuit }}</a>
{% elif value.power_panel %}
<a href="{{ value.power_panel.get_absolute_url }}">{{ value.power_panel }}</a>
{% endif %}
"""
DEVICE_LINK = """
<a href="{% url 'dcim:device' pk=record.pk %}">
{{ record.name|default:'<span class="label label-info">Unnamed device</span>' }}
</a>
"""
DEVICEBAY_STATUS = """
{% if record.installed_device_id %}
<span class="label label-{{ record.installed_device.get_status_class }}">
{{ record.installed_device.get_status_display }}
</span>
{% else %}
<span class="label label-default">Vacant</span>
{% endif %}
"""
INTERFACE_IPADDRESSES = """
{% for ip in record.ip_addresses.all %}
<a href="{{ ip.get_absolute_url }}">{{ ip }}</a><br />
{% endfor %}
"""
INTERFACE_TAGGED_VLANS = """
{% if record.mode == 'tagged' %}
{% for vlan in record.tagged_vlans.all %}
<a href="{{ vlan.get_absolute_url }}">{{ vlan }}</a><br />
{% endfor %}
{% elif record.mode == 'tagged-all' %}
All
{% else %}
—
{% endif %}
"""
MPTT_LINK = """
{% if record.get_children %}
<span style="padding-left: {{ record.get_ancestors|length }}0px "><i class="mdi mdi-chevron-right"></i>
{% else %}
<span style="padding-left: {{ record.get_ancestors|length }}9px">
{% endif %}
<a href="{{ record.get_absolute_url }}">{{ record.name }}</a>
</span>
"""
POWERFEED_CABLE = """
<a href="{{ value.get_absolute_url }}">{{ value }}</a>
<a href="{% url 'dcim:powerfeed_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace">
<i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i>
</a>
"""
POWERFEED_CABLETERMINATION = """
<a href="{{ value.parent.get_absolute_url }}">{{ value.parent }}</a>
<i class="mdi mdi-chevron-right"></i>
<a href="{{ value.get_absolute_url }}">{{ value }}</a>
"""
RACKGROUP_ELEVATIONS = """
<a href="{% url 'dcim:rack_elevation_list' %}?site={{ record.site.slug }}&group_id={{ record.pk }}" class="btn btn-xs btn-primary" title="View elevations">
<i class="mdi mdi-server"></i>
</a>
"""
UTILIZATION_GRAPH = """
{% load helpers %}
{% utilization_graph value %}
"""
#
# Device component buttons
#
CONSOLEPORT_BUTTONS = """
{% if record.cable %}
<a href="{% url 'dcim:consoleport_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace"><i class="mdi mdi-transit-connection-variant"></i></a>
{% include 'dcim/inc/cable_toggle_buttons.html' with cable=record.cable %}
{% elif perms.dcim.add_cable %}
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i></a>
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-lan-connect" aria-hidden="true"></i></a>
<span class="dropdown">
<button type="button" class="btn btn-success btn-xs dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mdi mdi-ethernet-cable" aria-hidden="true"></span>
</button>
<ul class="dropdown-menu dropdown-menu-right">
<li><a href="{% url 'dcim:consoleport_connect' termination_a_id=record.pk termination_b_type='console-server-port' %}?return_url={% url 'dcim:device_consoleports' pk=object.pk %}">Console Server Port</a></li>
<li><a href="{% url 'dcim:consoleport_connect' termination_a_id=record.pk termination_b_type='front-port' %}?return_url={% url 'dcim:device_consoleports' pk=object.pk %}">Front Port</a></li>
<li><a href="{% url 'dcim:consoleport_connect' termination_a_id=record.pk termination_b_type='rear-port' %}?return_url={% url 'dcim:device_consoleports' pk=object.pk %}">Rear Port</a></li>
</ul>
</span>
{% endif %}
"""
CONSOLESERVERPORT_BUTTONS = """
{% if record.cable %}
<a href="{% url 'dcim:consoleserverport_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace"><i class="mdi mdi-transit-connection-variant"></i></a>
{% include 'dcim/inc/cable_toggle_buttons.html' with cable=record.cable %}
{% elif perms.dcim.add_cable %}
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i></a>
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-lan-connect" aria-hidden="true"></i></a>
<span class="dropdown">
<button type="button" class="btn btn-success btn-xs dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mdi mdi-ethernet-cable" aria-hidden="true"></span>
</button>
<ul class="dropdown-menu dropdown-menu-right">
<li><a href="{% url 'dcim:consoleserverport_connect' termination_a_id=record.pk termination_b_type='console-port' %}?return_url={% url 'dcim:device_consoleserverports' pk=object.pk %}">Console Port</a></li>
<li><a href="{% url 'dcim:consoleserverport_connect' termination_a_id=record.pk termination_b_type='front-port' %}?return_url={% url 'dcim:device_consoleserverports' pk=object.pk %}">Front Port</a></li>
<li><a href="{% url 'dcim:consoleserverport_connect' termination_a_id=record.pk termination_b_type='rear-port' %}?return_url={% url 'dcim:device_consoleserverports' pk=object.pk %}">Rear Port</a></li>
</ul>
</span>
{% endif %}
"""
POWERPORT_BUTTONS = """
{% if record.cable %}
<a href="{% url 'dcim:powerport_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace"><i class="mdi mdi-transit-connection-variant"></i></a>
{% include 'dcim/inc/cable_toggle_buttons.html' with cable=record.cable %}
{% elif perms.dcim.add_cable %}
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i></a>
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-lan-connect" aria-hidden="true"></i></a>
<span class="dropdown">
<button type="button" class="btn btn-success btn-xs dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mdi mdi-ethernet-cable" aria-hidden="true"></span>
</button>
<ul class="dropdown-menu dropdown-menu-right">
<li><a href="{% url 'dcim:powerport_connect' termination_a_id=record.pk termination_b_type='power-outlet' %}?return_url={% url 'dcim:device_powerports' pk=object.pk %}">Power Outlet</a></li>
<li><a href="{% url 'dcim:powerport_connect' termination_a_id=record.pk termination_b_type='power-feed' %}?return_url={% url 'dcim:device_powerports' pk=object.pk %}">Power Feed</a></li>
</ul>
</span>
{% endif %}
"""
POWEROUTLET_BUTTONS = """
{% if record.cable %}
<a href="{% url 'dcim:poweroutlet_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace"><i class="mdi mdi-transit-connection-variant"></i></a>
{% include 'dcim/inc/cable_toggle_buttons.html' with cable=record.cable %}
{% elif perms.dcim.add_cable %}
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i></a>
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-lan-connect" aria-hidden="true"></i></a>
<a href="{% url 'dcim:poweroutlet_connect' termination_a_id=record.pk termination_b_type='power-port' %}?return_url={% url 'dcim:device_poweroutlets' pk=object.pk %}" title="Connect" class="btn btn-success btn-xs">
<i class="mdi mdi-ethernet-cable" aria-hidden="true"></i>
</a>
{% endif %}
"""
INTERFACE_BUTTONS = """
{% if perms.ipam.add_ipaddress %}
<a href="{% url 'ipam:ipaddress_add' %}?interface={{ record.pk }}&return_url={% url 'dcim:device_interfaces' pk=object.pk %}" class="btn btn-xs btn-success" title="Add IP address">
<i class="mdi mdi-plus-thick" aria-hidden="true"></i>
</a>
{% endif %}
{% if record.cable %}
<a href="{% url 'dcim:interface_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace"><i class="mdi mdi-transit-connection-variant"></i></a>
{% include 'dcim/inc/cable_toggle_buttons.html' with cable=record.cable %}
{% elif record.is_connectable and perms.dcim.add_cable %}
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i></a>
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-lan-connect" aria-hidden="true"></i></a>
<span class="dropdown">
<button type="button" class="btn btn-success btn-xs dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mdi mdi-ethernet-cable" aria-hidden="true"></span>
</button>
<ul class="dropdown-menu dropdown-menu-right">
<li><a href="{% url 'dcim:interface_connect' termination_a_id=record.pk termination_b_type='interface' %}?return_url={% url 'dcim:device_interfaces' pk=object.pk %}">Interface</a></li>
<li><a href="{% url 'dcim:interface_connect' termination_a_id=record.pk termination_b_type='front-port' %}?return_url={% url 'dcim:device_interfaces' pk=object.pk %}">Front Port</a></li>
<li><a href="{% url 'dcim:interface_connect' termination_a_id=record.pk termination_b_type='rear-port' %}?return_url={% url 'dcim:device_interfaces' pk=object.pk %}">Rear Port</a></li>
<li><a href="{% url 'dcim:interface_connect' termination_a_id=record.pk termination_b_type='circuit-termination' %}?return_url={% url 'dcim:device_interfaces' pk=object.pk %}">Circuit Termination</a></li>
</ul>
</span>
{% endif %}
"""
FRONTPORT_BUTTONS = """
{% if record.cable %}
<a href="{% url 'dcim:frontport_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace"><i class="mdi mdi-transit-connection-variant"></i></a>
{% include 'dcim/inc/cable_toggle_buttons.html' with cable=record.cable %}
{% elif perms.dcim.add_cable %}
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i></a>
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-lan-connect" aria-hidden="true"></i></a>
<span class="dropdown">
<button type="button" class="btn btn-success btn-xs dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mdi mdi-ethernet-cable" aria-hidden="true"></span>
</button>
<ul class="dropdown-menu dropdown-menu-right">
<li><a href="{% url 'dcim:frontport_connect' termination_a_id=record.pk termination_b_type='interface' %}?return_url={% url 'dcim:device_frontports' pk=object.pk %}">Interface</a></li>
<li><a href="{% url 'dcim:frontport_connect' termination_a_id=record.pk termination_b_type='console-server-port' %}?return_url={% url 'dcim:device_frontports' pk=object.pk %}">Console Server Port</a></li>
<li><a href="{% url 'dcim:frontport_connect' termination_a_id=record.pk termination_b_type='console-port' %}?return_url={% url 'dcim:device_frontports' pk=object.pk %}">Console Port</a></li>
<li><a href="{% url 'dcim:frontport_connect' termination_a_id=record.pk termination_b_type='front-port' %}?return_url={% url 'dcim:device_frontports' pk=object.pk %}">Front Port</a></li>
<li><a href="{% url 'dcim:frontport_connect' termination_a_id=record.pk termination_b_type='rear-port' %}?return_url={% url 'dcim:device_frontports' pk=object.pk %}">Rear Port</a></li>
<li><a href="{% url 'dcim:frontport_connect' termination_a_id=record.pk termination_b_type='circuit-termination' %}?return_url={% url 'dcim:device_frontports' pk=object.pk %}">Circuit Termination</a></li>
</ul>
</span>
{% endif %}
"""
REARPORT_BUTTONS = """
{% if record.cable %}
<a href="{% url 'dcim:rearport_trace' pk=record.pk %}" class="btn btn-primary btn-xs" title="Trace"><i class="mdi mdi-transit-connection-variant"></i></a>
{% include 'dcim/inc/cable_toggle_buttons.html' with cable=record.cable %}
{% elif perms.dcim.add_cable %}
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-transit-connection-variant" aria-hidden="true"></i></a>
<a href="#" class="btn btn-default btn-xs disabled"><i class="mdi mdi-lan-connect" aria-hidden="true"></i></a>
<span class="dropdown">
<button type="button" class="btn btn-success btn-xs dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mdi mdi-ethernet-cable" aria-hidden="true"></span>
</button>
<ul class="dropdown-menu dropdown-menu-right">
<li><a href="{% url 'dcim:rearport_connect' termination_a_id=record.pk termination_b_type='interface' %}?return_url={% url 'dcim:device_rearports' pk=object.pk %}">Interface</a></li>
<li><a href="{% url 'dcim:rearport_connect' termination_a_id=record.pk termination_b_type='front-port' %}?return_url={% url 'dcim:device_rearports' pk=object.pk %}">Front Port</a></li>
<li><a href="{% url 'dcim:rearport_connect' termination_a_id=record.pk termination_b_type='rear-port' %}?return_url={% url 'dcim:device_rearports' pk=object.pk %}">Rear Port</a></li>
<li><a href="{% url 'dcim:rearport_connect' termination_a_id=record.pk termination_b_type='circuit-termination' %}?return_url={% url 'dcim:device_rearports' pk=object.pk %}">Circuit Termination</a></li>
</ul>
</span>
{% endif %}
"""
DEVICEBAY_BUTTONS = """
{% if perms.dcim.change_devicebay %}
{% if record.installed_device %}
<a href="{% url 'dcim:devicebay_depopulate' pk=record.pk %}?return_url={% url 'dcim:device_devicebays' pk=object.pk %}" class="btn btn-danger btn-xs">
<i class="mdi mdi-minus-thick" aria-hidden="true" title="Remove device"></i>
</a>
{% else %}
<a href="{% url 'dcim:devicebay_populate' pk=record.pk %}?return_url={% url 'dcim:device_devicebays' pk=object.pk %}" class="btn btn-success btn-xs">
<i class="mdi mdi-plus-thick" aria-hidden="true" title="Install device"></i>
</a>
{% endif %}
{% endif %}
"""
| python | 14,692 |
import math
from datetime import datetime
import pandas as pd
from settings import IVarType
from util.langUtil import try_divide
class ClassicSupportFinder:
ARGS_DICT = {
'distinguishing_constant': {
'default': 10,
'range': [1, 30],
'step': 0.05,
'comment': 'Factor distinguishing between different bundles. The greater the number,'
'the more supports are bundled together. Adjacent distance for bundling '
'is directly equal to d_c * stddev (variability) e.g. stddev(200) (+ flat base of 1 pip)'
'Acts as a multiplier to stddev. If stddev type is flat, distinguishing amount is'
'd_c * 100 pips'
'UPDATE: d_c * pips only.',
'type': IVarType.CONTINUOUS,
},
'decay_constant': {
'default': 0.95,
'range': [0.1, 1],
'step': 0.01,
'type': IVarType.CONTINUOUS,
},
'width_coefficient': {
'default': 1, # a.k.a width decay
'range': [0, 2],
'step': 0.01,
'comment': 'Strength = width_coefficient * base + 1. At 0, Strength = 1 at all times. At greater numbers,'
'base width greatly increases strength.',
'type': IVarType.CONTINUOUS,
},
'clumping_coefficient': { # Done
'default': 1,
'range': [0.2, 2],
'step': 0.01,
'comment': 'Affects strength addition: (X+Y)/c. c=1 is default addition. The greater'
'the number, the less strength emphasis is on having multiple supports.'
'Smaller numbers (<1) enhance the importance of having multiple supports.'
'Sum(X_n)=(X_1+...X_N)/c^(N-1). Sum(X)=X/c^0=X, as expected.',
'type': IVarType.CONTINUOUS,
},
'variability_period': {
'default': 500,
'range': [200, 700],
'step': 50,
'comment': 'Used in determining bundling with distinguishing_constant. +-Stddev lines formed'
'from stddev(variability_period) calculation.',
'type': IVarType.CONTINUOUS,
},
'symmetry_coefficient': {
'default': 0,
'range': [0, 1],
'step': 0.1,
'type': IVarType.CONTINUOUS,
'comment': 'This greater this coefficient, the more it demands the left and right bases'
'of a support to be symmetrical. At 1, both sides can only be as wide'
'as their shortest side. At 0, both sides are as wide as their longest'
'side. Formula: min(min(l, r) * 1/c, max(l, r)) + max(l, r). Where the left'
'term represents the shorter side, compensated, and the right side is the'
'longer side. If c = 0, min(min, max) will be assumed to be max(l, r).',
},
'max_base': {
'default': 12,
'range': [5, 50],
'step': 1,
'type': IVarType.DISCRETE,
},
'min_base': {
'default': 2,
'range': [1, 5],
'step': 1,
'type': IVarType.DISCRETE,
},
'delta_constant': {
'default': 3,
'range': [1, 10],
'step': 1,
'type': IVarType.CONTINUOUS,
'comment': 'This is the main part of the algorithm that sets it apart from'
'the strictly inc. dec. peak algorithm. It allows for a \'give\' of '
'delta before considering it an increase or decrease. Peaks are defined not by'
'strictly decreasing numbers on both sides but instead, a looser requirement of'
'delta-decreasing numbers on both sides. If the values on the side increase but'
'within delta, it does not count as breaking the peak.'
'delta_constant is in units of pips.'
},
'smoothing_period': {
'default': 6,
'range': [3, 20],
'step': 1,
'type': IVarType.DISCRETE,
'comment': 'Conducting the same algorithm on a smoothed surface may generate supports missed'
'when operating on the candlestick data. These supports, from here on forwards called'
'smooth supports will corroborate the supports. Only when these supports cannot be'
'bundled with any existing bundles will they form their own bundle. Note. Peaks and troughs'
'are detected with delta=0, strict peaks/troughs. min_base will be of the same size as'
'the normal min_base.'
},
# Unused
# 'value_type': { # Not used at the moment
# 'default': 'close', # index 0
# 'idx ': 0,
# 'range': ['close', 'open', 'high_low', 'average'],
# 'type': IVarType.ENUM
# },
# 'variability_type': {
# 'default': 'flat',
# 'idx': 1,
# 'range': ['stddev', 'flat'],
# 'type': IVarType.ENUM
# }
}
OTHER_ARGS_DICT = {
'lookback_period': {
'default': 20,
},
'strength_cutoff': {
'default': 0.01, # strength = log(base)
'range': [0.001, 0.1],
'step': 0.001,
'type': IVarType.CONTINUOUS,
},
'date_cutoff': {
'default': 25, # strength = log(base)
'range': [],
'step': 0,
'type': IVarType.CONTINUOUS,
},
}
# Constants
PEAK, TROUGH = 1, -1
# Other args
PREPARE_PERIOD = 0
def __init__(self, ivar=None):
# == Main Args ==
if ivar is None:
ivar = self.ARGS_DICT
self.time = None
self.started = None
self.df = None
if ivar:
self.ivar = ivar
else:
self.ivar = self.ARGS_DICT
# ARGS_DICT
self.distinguishing_constant = ivar['distinguishing_constant']['default']
self.decay_constant = ivar['decay_constant']['default']
self.variability_period = ivar['variability_period']['default']
self.symmetry_coefficient = ivar['symmetry_coefficient']['default']
self.max_base = ivar['max_base']['default']
self.min_base = ivar['min_base']['default']
self.delta_constant = ivar['delta_constant']['default']
self.delta_value = self.delta_constant * 0.0001
self.width_coefficient = ivar['width_coefficient']['default']
self.clumping_strength = ivar['clumping_coefficient']['default']
# self.value_type = ivar['value_type']['default']
# self.variability_type = ivar['variability_type']['default']
self.ivar_check()
# OTHER ARGS
self.lookback_period = self.OTHER_ARGS_DICT['lookback_period']['default']
self.strength_cutoff = self.OTHER_ARGS_DICT['strength_cutoff']['default']
self.date_cutoff = self.OTHER_ARGS_DICT['date_cutoff']['default']
# Constants
self.pip = 0.0001
self.min_left = self.min_base // 2
# == Variables ==
# Variable arrays
self.decay = math.pow(math.e, - self.decay_constant)
self.bundles = [] # supports build up into bundles
self.supports = [] # handles to the supports themselves
self.delta_data = [] # -1: delta-descending, 0: within delta, 1: delta-ascending
self.accum_df = pd.DataFrame()
self.delta_df = pd.DataFrame()
# Tracking variables
self.last_peak, self.last_trough, self.new_peak, self.new_trough = 0, 0, 0, 0
self.peak, self.trough, self.has_new = 0, 0, False
self.last_lookback, self.last_support, self.last_delta, self.delta_flipped = 0, None, 0, False
self.idx = 0
# Collecting data across time
self.avg_strength = []
self.n_supports = []
self.avg_strength = []
# Indicators
self.stdev = []
# == Testing ==
self.test_mode = None
def ivar_check(self):
"""Ensures the IVar variables are 1) within range and 2) in the correct format."""
for key in self.ivar.keys():
arg = self.ivar[key]
def reset(self, ivar):
self.__init__(ivar)
def start(self, meta_or_param, pre_data: pd.DataFrame, test_mode=False):
"""?Start"""
self.reset(self.ivar) # External codes should reset it instead.
self.test_mode = test_mode
# == Data Meta ==
pass
# == Preparation ==
self.df = pd.DataFrame()
self.idx += len(pre_data) - 1
# == Statistical Data ==
self.n_supports = []
self.avg_strength = []
# == Status ==
self.started = True
self.time = datetime.now()
# Setup consequences of pre_data
for i in range(max(0, len(pre_data) - self.lookback_period), len(pre_data)):
self.pre_next(pre_data[i:i + 1]) # df.Close, Open, High, Low
self.n_supports.append(0)
self.avg_strength.append(0)
self.delta_df.index_name = 'date'
def support_find(self, data):
"""Find supports in data w.r.t current (latest) index"""
for i in range(len(data) - self.date_cutoff, len(data)):
pass
def set_pip_value(self, pip):
self.pip = pip
# ==== Algo ====
def next(self, candlestick):
# Next
# self.df = self.df.append(candlestick)
self.df = pd.concat([self.df, candlestick])
self.idx += 1
# Note: This algorithm is index agnostic
# self.supports = [] # temporary
_max, _min = 0, math.inf
if len(self.df) < 2:
return
self.build_indicators()
# ===== Algorithm ======
# (1) Compare old[-1] and new candle
diff = self.df.Close[-2] - self.df.Close[-1]
self.delta_flipped = False
if abs(diff) < self.delta_value:
self.delta_data.append(0)
else:
if diff > 0: # Past candle is higher than latest candle
delta_val = -1
else:
delta_val = 1
self.delta_data.append(delta_val)
# 1 to -1 or -1 to 1. 0s break the chain
if self.last_delta != 0:
self.delta_flipped = (self.last_delta != delta_val)
self.last_delta = delta_val
# Update delta df
self.delta_df = pd.concat([self.delta_df, pd.DataFrame({
'delta': self.delta_data[-1]
}, index=[self.df.index[-1]])])
if len(self.accum_df > 0):
self.accum_df = pd.concat([self.accum_df, pd.DataFrame({
'delta': self.delta_data[-1] + self.accum_df.delta[-1]
}, index=[self.df.index[-1]])])
else:
self.accum_df = self.accum_df.append(pd.DataFrame({
'delta': self.delta_data[-1]
}, index=[self.df.index[-1]]))
# (2) Get next peak/trough:
if self.trough == self.peak: # Find any peak/trough
if self.last_delta == 0:
pass # ignore and continue
# Do not create support, but create left base first
elif self.last_delta == 1:
self.trough = self.idx - 1
elif self.last_delta == -1:
self.peak = self.idx - 1
elif self.trough > self.peak: # Find new peak
if self.delta_flipped: # Found!
# 'default' peak properties
self.peak = self.idx - 1
left_base = self.peak - self.trough
start = self.trough
end = self.idx
height = self.df.Close[self.peak]
# Check if supports (previous and current) have min_base
if left_base < self.min_base // 2: # new left base = old right base
# Destroy left support
if self.has_new:
self.delete_support(self.supports[-1])
# Do not create new support, past support cannot be extended also
self.has_new = False
else: # left base > min_base // 2, OK
# Try to find true peak (a.k.a delta=0 peak)
# todo: 1) check if sorting works 2) check if df.index.get_loc works
peaks = self.df[self.trough:self.peak + 1][self.df.Close >= height].sort_values(by=['Close'],
ascending=False)
# If no alt. peaks, loop will terminate at df.Close == height
for i, peak in peaks.iterrows():
# Check if alt. left_base is of minimum length,
_peak = self.df.index.get_loc(i)
_left_base = _peak - self.trough
if _left_base >= self.min_base // 2: # Add as new peak
# Adjust previous support's base
# self.update_support(self.supports[-1], 'end', _peak) # no need to. auto extended!
# Register peak
height = peak['Close']
self.peak = _peak
self.create_support(self.peak, start, end, height, self.PEAK)
self.has_new = True
break
else: # otherwise continue
continue
else:
if self.has_new:
if self.try_extend(self.supports[-1]):
pass # if extension (to the right) successful, do nothing
else:
# Reset status to 'neutral'
self.has_new = False
# self.trough = self.peak = self.idx # no need to reset completely
else: # No older support to extend. Old trough and peak cannot be further than min_base/2 away
# Last support was trough. Only reset trough.
self.trough = max(self.trough, self.idx - self.min_left) # reset
elif self.peak > self.trough: # Find new trough
if self.delta_flipped:
self.trough = self.idx - 1
left_base = self.trough - self.peak
start = self.peak
end = self.idx
depth = self.df.Close[self.trough]
# Check if supports have min_base
if left_base < self.min_base // 2:
# Destroy left support
if self.has_new:
self.delete_support(self.supports[-1])
# Past support cannot be extended
self.has_new = False
else:
# Try to find true trough
troughs = self.df[self.peak:self.trough + 1][self.df.Close <= depth].sort_values(by=['Close'],
ascending=True)
for i, trough in troughs.iterrows():
# Check if alt. trough has min_base
_trough = self.df.index.get_loc(i)
_left_base = _trough - self.peak
if _left_base >= self.min_base // 2:
# Adjust previous support's base
# self.update_support(self.supports[-1], 'end', _trough)
# Register trough
depth = trough['Close']
self.trough = _trough
self.create_support(self.trough, start, end, depth, self.TROUGH)
self.has_new = True
break
else:
continue
else:
if self.has_new:
if self.try_extend(self.supports[-1]):
pass
else:
self.has_new = False
else: # Reset peak only (Searching for trough)
self.peak = max(self.peak, self.idx - self.min_left)
# ===== Bundling =====
# Bundling is automatic when creating supports
# Decay bundles
self.decay_all()
# ===== Stats =====
self.n_supports.append(len(self.bundles))
# self.avg_strength.append(try_mean([bundle['strength'] for bundle in self.bundles]))
# ===== Return function =====
# None in this case
# print(self.bundles)
def pre_next(self, candlestick):
self.df = self.df.append(candlestick)
# Pre_data supports will be ignored! If that is not desired, do not include pre_data
self.delta_data.append(0)
self.delta_df = self.delta_df.append(pd.DataFrame({
'delta': 0
}, index=[self.df.index[-1]]))
self.accum_df = self.accum_df.append(pd.DataFrame({
'delta': 0
}, index=[self.df.index[-1]]))
# ==============
def get_supports(self):
return self.bundles
def get_value(self, idx, peak_type=TROUGH):
if self.value_type == 'close':
return self.df.Close[idx]
if self.value_type == 'high_low':
if peak_type == self.TROUGH:
return self.df.Low[idx]
elif peak_type == self.PEAK:
return self.df.High[idx]
if self.value_type == 'open':
return self.df.Open[idx]
if self.value_type == 'average':
return self.df.Close[idx] # ?
return None
def get_sort_height(self, idx, peak_type=TROUGH):
"""Sorts based on value type"""
if self.value_type == 'close':
return self.df.Close[idx]
if self.value_type == 'high_low':
if peak_type == self.TROUGH:
return self.df.Low[idx]
elif peak_type == self.PEAK:
return self.df.High[idx]
if self.value_type == 'open':
return self.df.Open[idx]
if self.value_type == 'average':
return self.df.Close[idx] # ?
pass
def get_resistances(self):
"""Only get support ceilings"""
last = self.df.Close[-1]
_bundles = []
for bundle in self.bundles:
if bundle['height'] > last:
_bundles.append(bundle)
return _bundles
def get_resistance_supports(self):
"""Only get support floors"""
last = self.df.Close[-1]
_bundles = []
for bundle in self.bundles:
if bundle['height'] < last:
_bundles.append(bundle)
return _bundles
def get_instructions(self):
# Lines should get lighter the weaker they are
# Data should be pd.DataFrame format with index and 'height'/value
data = pd.DataFrame(index=[self.get_idx_date(bundle['peak']) for bundle in self.bundles], data={
'strength': [bundle['strength'] for bundle in self.bundles],
'height': [bundle['height'] for bundle in self.bundles],
'peak': [bundle['peak'] for bundle in self.bundles],
})
# data = pd.DataFrame(index=[[self.df.index.get_loc(bundle['peak']) for bundle in self.bundles]], data={
# 'strength': [[bundle['strength'] for bundle in self.bundles]],
# 'height': [[bundle['height'] for bundle in self.bundles]],
# })
return [{
'index': 0,
'data': data,
'type': 'support',
'colour': 'black',
},
# {
# 'index': 0,
# 'data': smooth_data,
# 'type': 'support',
# 'colour': 'red',
# },
{
'index': 1,
'data': self.delta_df.copy(),
'type': 'line',
'colour': 'black',
}, {
'index': 2,
'data': self.accum_df.copy(),
'type': 'line',
'colour': 'black',
}]
def build_indicators(self):
# self.stdev = talib.STDDEV(self.df, self.variability_period)
pass
# Util functions
def bundle_add(self, _bundle, support):
for bundle in self.bundles:
if bundle == _bundle:
bundle['supports'].append(support)
self.calculate_bundle(bundle)
def calc_strength(self, support):
"""Takes width and time decay into account. Recalculates the current strength value of a support."""
strength = self.calc_raw_strength(support)
dist = self.idx - support['peak']
support['strength'] = math.pow(self.decay_constant, dist) * strength
return support['strength']
def calc_raw_strength(self, support):
left = support['end'] - support['peak']
right = support['peak'] - support['start']
# Symmetry considerations
base = min(min(left, right) * try_divide(1, self.symmetry_coefficient), max(left, right)) \
+ max(left, right)
if math.isnan(base): # Occurs only on 0 * inf
base = min(left, right)
# Max base consideration
base = min(base, self.max_base)
# Width contribution consideration
return base * self.width_coefficient + 1
def calculate_bundle(self, bundle):
strength = 0
peak = 0 # (position)
height = 0
for support in bundle['supports']:
# strength += self.calc_strength(support, idx)
strength += support['strength']
# peak += support['strength'] * support['peak']
height += support['strength'] * support['height']
peak = support['peak']
bundle['strength'] = try_divide(strength, math.pow(self.clumping_strength, len(bundle['supports']) - 1))
# bundle['peak'] = try_divide(bundle['peak'], len(bundle['supports']) * strength)
bundle['peak'] = peak # Last added peak
bundle['height'] = try_divide(height, len(bundle['supports']) * strength)
return strength
def combine_bundles(self):
"""Use closeness/2 metric. Combine from top to bottom."""
pass
def create_bundle(self, support):
"""Create new bundle around support."""
bundle = {
'strength': 0,
'peak': 0,
'height': 0,
'supports': [support]
}
self.bundles.append(bundle)
# print(F'Creating {bundle}')
return bundle
def create_support(self, peak, start, end, height, type):
"""Create support within bounds end and start, at peak, with value of height.
Types are TROUGH or PEAK. 'open' is whether the support is available for
base extension (to increase its strength). Then, add it into closest bundle if possible.
Otherwise, it becomes a bundle of its own."""
support = {
'peak': peak,
'start': start,
'end': end,
'height': height,
'type': type,
'open': True,
}
support.update({
'strength': self.calc_raw_strength(support)
})
# Add into some bundle
added = False
for bundle in self.bundles:
if self.within_bundle(bundle, support):
self.bundle_add(bundle, support)
# print(F'Creating {support} in {bundle}')
added = True
break
# Make new bundle
if not added:
bundle = self.create_bundle(support)
# print(F'Creating {support} in new {bundle}')
self.calculate_bundle(bundle)
self.supports.append(support)
return support
def decay_all(self):
for bundle in self.bundles:
self.decay_bundle(bundle)
self.delete_decayed()
def decay_bundle(self, bundle):
for support in bundle['supports']:
self.decay_support(support)
self.calculate_bundle(bundle)
def decay_by(self, strength, length):
return strength * math.pow(self.ARGS_DICT['decay'], length)
def decay_support(self, support):
support['strength'] = support['strength'] * self.decay_constant
def delete_support(self, _support):
for bundle in self.bundles:
for support in bundle['supports']:
if support == _support:
# print(F'Deleting {_support} from {bundle}')
bundle['supports'].remove(support)
# If bundle has no supports, remove it
if len(bundle['supports']) == 0:
self.bundles.remove(bundle)
self.supports.remove(support)
def delete_decayed(self):
for bundle in self.bundles:
if bundle['strength'] < self.strength_cutoff:
self.bundles.remove(bundle)
def get_bundle(self, support):
for bundle in self.bundles:
if support in bundle['supports']:
return bundle
return None
def get_idx_date(self, idx):
if idx < 0 or idx > self.idx:
idx = 0
return self.df.index[idx]
def try_extend(self, support):
"""Extend length of peak. This affects its strength. Upon extension, recalculate
decay effects."""
if support['end'] - support['start'] > self.max_base: # base too long, reset
return False
elif support['type'] == self.PEAK and support['height'] < self.df.High[self.idx]: # new base too high
return False
elif support['type'] == self.TROUGH and support['height'] > self.df.Low[self.idx]: # new base too low
return False
# Calculate new strength
support['end'] += 1
# support['strength'] += 1 * math.pow(self.decay, self.idx - support['peak']) * self.width_coefficient
support['strength'] = self.calc_strength(support)
# Recalculate bundle strength
self.calculate_bundle(self.get_bundle(support))
return True
def update_support(self, support, arg, val):
support[arg] = val
self.calculate_bundle(self.get_bundle(support))
def within_bundle(self, bundle, support):
if abs(bundle['height'] - support['height']) < self.distinguishing_constant * self.pip:
return True
return False
| python | 26,961 |
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(object):
def getAPIVersion(self):
pass
def createGroup(self, authzToken, groupModel):
"""
Parameters:
- authzToken
- groupModel
"""
pass
def updateGroup(self, authzToken, groupModel):
"""
Parameters:
- authzToken
- groupModel
"""
pass
def deleteGroup(self, authzToken, groupId, ownerId):
"""
Parameters:
- authzToken
- groupId
- ownerId
"""
pass
def getGroup(self, authzToken, groupId):
"""
Parameters:
- authzToken
- groupId
"""
pass
def getGroups(self, authzToken):
"""
Parameters:
- authzToken
"""
pass
def getAllGroupsUserBelongs(self, authzToken, userName):
"""
Parameters:
- authzToken
- userName
"""
pass
def addUsersToGroup(self, authzToken, userIds, groupId):
"""
Parameters:
- authzToken
- userIds
- groupId
"""
pass
def removeUsersFromGroup(self, authzToken, userIds, groupId):
"""
Parameters:
- authzToken
- userIds
- groupId
"""
pass
def transferGroupOwnership(self, authzToken, groupId, newOwnerId):
"""
Parameters:
- authzToken
- groupId
- newOwnerId
"""
pass
def addGroupAdmins(self, authzToken, groupId, adminIds):
"""
Parameters:
- authzToken
- groupId
- adminIds
"""
pass
def removeGroupAdmins(self, authzToken, groupId, adminIds):
"""
Parameters:
- authzToken
- groupId
- adminIds
"""
pass
def hasAdminAccess(self, authzToken, groupId, adminId):
"""
Parameters:
- authzToken
- groupId
- adminId
"""
pass
def hasOwnerAccess(self, authzToken, groupId, ownerId):
"""
Parameters:
- authzToken
- groupId
- ownerId
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def getAPIVersion(self):
self.send_getAPIVersion()
return self.recv_getAPIVersion()
def send_getAPIVersion(self):
self._oprot.writeMessageBegin('getAPIVersion', TMessageType.CALL, self._seqid)
args = getAPIVersion_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getAPIVersion(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getAPIVersion_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
raise TApplicationException(TApplicationException.MISSING_RESULT, "getAPIVersion failed: unknown result")
def createGroup(self, authzToken, groupModel):
"""
Parameters:
- authzToken
- groupModel
"""
self.send_createGroup(authzToken, groupModel)
return self.recv_createGroup()
def send_createGroup(self, authzToken, groupModel):
self._oprot.writeMessageBegin('createGroup', TMessageType.CALL, self._seqid)
args = createGroup_args()
args.authzToken = authzToken
args.groupModel = groupModel
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createGroup(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = createGroup_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "createGroup failed: unknown result")
def updateGroup(self, authzToken, groupModel):
"""
Parameters:
- authzToken
- groupModel
"""
self.send_updateGroup(authzToken, groupModel)
return self.recv_updateGroup()
def send_updateGroup(self, authzToken, groupModel):
self._oprot.writeMessageBegin('updateGroup', TMessageType.CALL, self._seqid)
args = updateGroup_args()
args.authzToken = authzToken
args.groupModel = groupModel
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateGroup(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = updateGroup_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "updateGroup failed: unknown result")
def deleteGroup(self, authzToken, groupId, ownerId):
"""
Parameters:
- authzToken
- groupId
- ownerId
"""
self.send_deleteGroup(authzToken, groupId, ownerId)
return self.recv_deleteGroup()
def send_deleteGroup(self, authzToken, groupId, ownerId):
self._oprot.writeMessageBegin('deleteGroup', TMessageType.CALL, self._seqid)
args = deleteGroup_args()
args.authzToken = authzToken
args.groupId = groupId
args.ownerId = ownerId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteGroup(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteGroup_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteGroup failed: unknown result")
def getGroup(self, authzToken, groupId):
"""
Parameters:
- authzToken
- groupId
"""
self.send_getGroup(authzToken, groupId)
return self.recv_getGroup()
def send_getGroup(self, authzToken, groupId):
self._oprot.writeMessageBegin('getGroup', TMessageType.CALL, self._seqid)
args = getGroup_args()
args.authzToken = authzToken
args.groupId = groupId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getGroup(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getGroup_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "getGroup failed: unknown result")
def getGroups(self, authzToken):
"""
Parameters:
- authzToken
"""
self.send_getGroups(authzToken)
return self.recv_getGroups()
def send_getGroups(self, authzToken):
self._oprot.writeMessageBegin('getGroups', TMessageType.CALL, self._seqid)
args = getGroups_args()
args.authzToken = authzToken
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getGroups(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getGroups_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "getGroups failed: unknown result")
def getAllGroupsUserBelongs(self, authzToken, userName):
"""
Parameters:
- authzToken
- userName
"""
self.send_getAllGroupsUserBelongs(authzToken, userName)
return self.recv_getAllGroupsUserBelongs()
def send_getAllGroupsUserBelongs(self, authzToken, userName):
self._oprot.writeMessageBegin('getAllGroupsUserBelongs', TMessageType.CALL, self._seqid)
args = getAllGroupsUserBelongs_args()
args.authzToken = authzToken
args.userName = userName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getAllGroupsUserBelongs(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getAllGroupsUserBelongs_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "getAllGroupsUserBelongs failed: unknown result")
def addUsersToGroup(self, authzToken, userIds, groupId):
"""
Parameters:
- authzToken
- userIds
- groupId
"""
self.send_addUsersToGroup(authzToken, userIds, groupId)
return self.recv_addUsersToGroup()
def send_addUsersToGroup(self, authzToken, userIds, groupId):
self._oprot.writeMessageBegin('addUsersToGroup', TMessageType.CALL, self._seqid)
args = addUsersToGroup_args()
args.authzToken = authzToken
args.userIds = userIds
args.groupId = groupId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addUsersToGroup(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = addUsersToGroup_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "addUsersToGroup failed: unknown result")
def removeUsersFromGroup(self, authzToken, userIds, groupId):
"""
Parameters:
- authzToken
- userIds
- groupId
"""
self.send_removeUsersFromGroup(authzToken, userIds, groupId)
return self.recv_removeUsersFromGroup()
def send_removeUsersFromGroup(self, authzToken, userIds, groupId):
self._oprot.writeMessageBegin('removeUsersFromGroup', TMessageType.CALL, self._seqid)
args = removeUsersFromGroup_args()
args.authzToken = authzToken
args.userIds = userIds
args.groupId = groupId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_removeUsersFromGroup(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = removeUsersFromGroup_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "removeUsersFromGroup failed: unknown result")
def transferGroupOwnership(self, authzToken, groupId, newOwnerId):
"""
Parameters:
- authzToken
- groupId
- newOwnerId
"""
self.send_transferGroupOwnership(authzToken, groupId, newOwnerId)
return self.recv_transferGroupOwnership()
def send_transferGroupOwnership(self, authzToken, groupId, newOwnerId):
self._oprot.writeMessageBegin('transferGroupOwnership', TMessageType.CALL, self._seqid)
args = transferGroupOwnership_args()
args.authzToken = authzToken
args.groupId = groupId
args.newOwnerId = newOwnerId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_transferGroupOwnership(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = transferGroupOwnership_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "transferGroupOwnership failed: unknown result")
def addGroupAdmins(self, authzToken, groupId, adminIds):
"""
Parameters:
- authzToken
- groupId
- adminIds
"""
self.send_addGroupAdmins(authzToken, groupId, adminIds)
return self.recv_addGroupAdmins()
def send_addGroupAdmins(self, authzToken, groupId, adminIds):
self._oprot.writeMessageBegin('addGroupAdmins', TMessageType.CALL, self._seqid)
args = addGroupAdmins_args()
args.authzToken = authzToken
args.groupId = groupId
args.adminIds = adminIds
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addGroupAdmins(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = addGroupAdmins_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "addGroupAdmins failed: unknown result")
def removeGroupAdmins(self, authzToken, groupId, adminIds):
"""
Parameters:
- authzToken
- groupId
- adminIds
"""
self.send_removeGroupAdmins(authzToken, groupId, adminIds)
return self.recv_removeGroupAdmins()
def send_removeGroupAdmins(self, authzToken, groupId, adminIds):
self._oprot.writeMessageBegin('removeGroupAdmins', TMessageType.CALL, self._seqid)
args = removeGroupAdmins_args()
args.authzToken = authzToken
args.groupId = groupId
args.adminIds = adminIds
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_removeGroupAdmins(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = removeGroupAdmins_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "removeGroupAdmins failed: unknown result")
def hasAdminAccess(self, authzToken, groupId, adminId):
"""
Parameters:
- authzToken
- groupId
- adminId
"""
self.send_hasAdminAccess(authzToken, groupId, adminId)
return self.recv_hasAdminAccess()
def send_hasAdminAccess(self, authzToken, groupId, adminId):
self._oprot.writeMessageBegin('hasAdminAccess', TMessageType.CALL, self._seqid)
args = hasAdminAccess_args()
args.authzToken = authzToken
args.groupId = groupId
args.adminId = adminId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_hasAdminAccess(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = hasAdminAccess_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "hasAdminAccess failed: unknown result")
def hasOwnerAccess(self, authzToken, groupId, ownerId):
"""
Parameters:
- authzToken
- groupId
- ownerId
"""
self.send_hasOwnerAccess(authzToken, groupId, ownerId)
return self.recv_hasOwnerAccess()
def send_hasOwnerAccess(self, authzToken, groupId, ownerId):
self._oprot.writeMessageBegin('hasOwnerAccess', TMessageType.CALL, self._seqid)
args = hasOwnerAccess_args()
args.authzToken = authzToken
args.groupId = groupId
args.ownerId = ownerId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_hasOwnerAccess(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = hasOwnerAccess_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.gse is not None:
raise result.gse
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "hasOwnerAccess failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getAPIVersion"] = Processor.process_getAPIVersion
self._processMap["createGroup"] = Processor.process_createGroup
self._processMap["updateGroup"] = Processor.process_updateGroup
self._processMap["deleteGroup"] = Processor.process_deleteGroup
self._processMap["getGroup"] = Processor.process_getGroup
self._processMap["getGroups"] = Processor.process_getGroups
self._processMap["getAllGroupsUserBelongs"] = Processor.process_getAllGroupsUserBelongs
self._processMap["addUsersToGroup"] = Processor.process_addUsersToGroup
self._processMap["removeUsersFromGroup"] = Processor.process_removeUsersFromGroup
self._processMap["transferGroupOwnership"] = Processor.process_transferGroupOwnership
self._processMap["addGroupAdmins"] = Processor.process_addGroupAdmins
self._processMap["removeGroupAdmins"] = Processor.process_removeGroupAdmins
self._processMap["hasAdminAccess"] = Processor.process_hasAdminAccess
self._processMap["hasOwnerAccess"] = Processor.process_hasOwnerAccess
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getAPIVersion(self, seqid, iprot, oprot):
args = getAPIVersion_args()
args.read(iprot)
iprot.readMessageEnd()
result = getAPIVersion_result()
try:
result.success = self._handler.getAPIVersion()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getAPIVersion", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createGroup(self, seqid, iprot, oprot):
args = createGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = createGroup_result()
try:
result.success = self._handler.createGroup(args.authzToken, args.groupModel)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createGroup", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateGroup(self, seqid, iprot, oprot):
args = updateGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateGroup_result()
try:
result.success = self._handler.updateGroup(args.authzToken, args.groupModel)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("updateGroup", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteGroup(self, seqid, iprot, oprot):
args = deleteGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteGroup_result()
try:
result.success = self._handler.deleteGroup(args.authzToken, args.groupId, args.ownerId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deleteGroup", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getGroup(self, seqid, iprot, oprot):
args = getGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = getGroup_result()
try:
result.success = self._handler.getGroup(args.authzToken, args.groupId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getGroup", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getGroups(self, seqid, iprot, oprot):
args = getGroups_args()
args.read(iprot)
iprot.readMessageEnd()
result = getGroups_result()
try:
result.success = self._handler.getGroups(args.authzToken)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getGroups", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getAllGroupsUserBelongs(self, seqid, iprot, oprot):
args = getAllGroupsUserBelongs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getAllGroupsUserBelongs_result()
try:
result.success = self._handler.getAllGroupsUserBelongs(args.authzToken, args.userName)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getAllGroupsUserBelongs", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addUsersToGroup(self, seqid, iprot, oprot):
args = addUsersToGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addUsersToGroup_result()
try:
result.success = self._handler.addUsersToGroup(args.authzToken, args.userIds, args.groupId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("addUsersToGroup", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_removeUsersFromGroup(self, seqid, iprot, oprot):
args = removeUsersFromGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = removeUsersFromGroup_result()
try:
result.success = self._handler.removeUsersFromGroup(args.authzToken, args.userIds, args.groupId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("removeUsersFromGroup", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_transferGroupOwnership(self, seqid, iprot, oprot):
args = transferGroupOwnership_args()
args.read(iprot)
iprot.readMessageEnd()
result = transferGroupOwnership_result()
try:
result.success = self._handler.transferGroupOwnership(args.authzToken, args.groupId, args.newOwnerId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("transferGroupOwnership", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addGroupAdmins(self, seqid, iprot, oprot):
args = addGroupAdmins_args()
args.read(iprot)
iprot.readMessageEnd()
result = addGroupAdmins_result()
try:
result.success = self._handler.addGroupAdmins(args.authzToken, args.groupId, args.adminIds)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("addGroupAdmins", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_removeGroupAdmins(self, seqid, iprot, oprot):
args = removeGroupAdmins_args()
args.read(iprot)
iprot.readMessageEnd()
result = removeGroupAdmins_result()
try:
result.success = self._handler.removeGroupAdmins(args.authzToken, args.groupId, args.adminIds)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("removeGroupAdmins", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_hasAdminAccess(self, seqid, iprot, oprot):
args = hasAdminAccess_args()
args.read(iprot)
iprot.readMessageEnd()
result = hasAdminAccess_result()
try:
result.success = self._handler.hasAdminAccess(args.authzToken, args.groupId, args.adminId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("hasAdminAccess", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_hasOwnerAccess(self, seqid, iprot, oprot):
args = hasOwnerAccess_args()
args.read(iprot)
iprot.readMessageEnd()
result = hasOwnerAccess_result()
try:
result.success = self._handler.hasOwnerAccess(args.authzToken, args.groupId, args.ownerId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException as gse:
msg_type = TMessageType.REPLY
result.gse = gse
except airavata.api.error.ttypes.AuthorizationException as ae:
msg_type = TMessageType.REPLY
result.ae = ae
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("hasOwnerAccess", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class getAPIVersion_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getAPIVersion_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getAPIVersion_result(object):
"""
Attributes:
- success
- gse
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, gse=None,):
self.success = success
self.gse = gse
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getAPIVersion_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createGroup_args(object):
"""
Attributes:
- authzToken
- groupModel
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'groupModel', (airavata.model.group.ttypes.GroupModel, airavata.model.group.ttypes.GroupModel.thrift_spec), None, ), # 2
)
def __init__(self, authzToken=None, groupModel=None,):
self.authzToken = authzToken
self.groupModel = groupModel
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.groupModel = airavata.model.group.ttypes.GroupModel()
self.groupModel.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createGroup_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupModel is not None:
oprot.writeFieldBegin('groupModel', TType.STRUCT, 2)
self.groupModel.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupModel is None:
raise TProtocolException(message='Required field groupModel is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createGroup_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateGroup_args(object):
"""
Attributes:
- authzToken
- groupModel
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'groupModel', (airavata.model.group.ttypes.GroupModel, airavata.model.group.ttypes.GroupModel.thrift_spec), None, ), # 2
)
def __init__(self, authzToken=None, groupModel=None,):
self.authzToken = authzToken
self.groupModel = groupModel
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.groupModel = airavata.model.group.ttypes.GroupModel()
self.groupModel.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateGroup_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupModel is not None:
oprot.writeFieldBegin('groupModel', TType.STRUCT, 2)
self.groupModel.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupModel is None:
raise TProtocolException(message='Required field groupModel is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateGroup_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteGroup_args(object):
"""
Attributes:
- authzToken
- groupId
- ownerId
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'groupId', 'UTF8', None, ), # 2
(3, TType.STRING, 'ownerId', 'UTF8', None, ), # 3
)
def __init__(self, authzToken=None, groupId=None, ownerId=None,):
self.authzToken = authzToken
self.groupId = groupId
self.ownerId = ownerId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.ownerId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteGroup_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 2)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
if self.ownerId is not None:
oprot.writeFieldBegin('ownerId', TType.STRING, 3)
oprot.writeString(self.ownerId.encode('utf-8') if sys.version_info[0] == 2 else self.ownerId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
if self.ownerId is None:
raise TProtocolException(message='Required field ownerId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteGroup_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroup_args(object):
"""
Attributes:
- authzToken
- groupId
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'groupId', 'UTF8', None, ), # 2
)
def __init__(self, authzToken=None, groupId=None,):
self.authzToken = authzToken
self.groupId = groupId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroup_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 2)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroup_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (airavata.model.group.ttypes.GroupModel, airavata.model.group.ttypes.GroupModel.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = airavata.model.group.ttypes.GroupModel()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_args(object):
"""
Attributes:
- authzToken
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
)
def __init__(self, authzToken=None,):
self.authzToken = authzToken
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (airavata.model.group.ttypes.GroupModel, airavata.model.group.ttypes.GroupModel.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = airavata.model.group.ttypes.GroupModel()
_elem5.read(iprot)
self.success.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter6 in self.success:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getAllGroupsUserBelongs_args(object):
"""
Attributes:
- authzToken
- userName
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'userName', 'UTF8', None, ), # 2
)
def __init__(self, authzToken=None, userName=None,):
self.authzToken = authzToken
self.userName = userName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.userName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getAllGroupsUserBelongs_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.userName is not None:
oprot.writeFieldBegin('userName', TType.STRING, 2)
oprot.writeString(self.userName.encode('utf-8') if sys.version_info[0] == 2 else self.userName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.userName is None:
raise TProtocolException(message='Required field userName is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getAllGroupsUserBelongs_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (airavata.model.group.ttypes.GroupModel, airavata.model.group.ttypes.GroupModel.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = airavata.model.group.ttypes.GroupModel()
_elem12.read(iprot)
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getAllGroupsUserBelongs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter13 in self.success:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUsersToGroup_args(object):
"""
Attributes:
- authzToken
- userIds
- groupId
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.LIST, 'userIds', (TType.STRING, 'UTF8', False), None, ), # 2
(3, TType.STRING, 'groupId', 'UTF8', None, ), # 3
)
def __init__(self, authzToken=None, userIds=None, groupId=None,):
self.authzToken = authzToken
self.userIds = userIds
self.groupId = groupId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.userIds = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.userIds.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUsersToGroup_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.userIds is not None:
oprot.writeFieldBegin('userIds', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.userIds))
for iter20 in self.userIds:
oprot.writeString(iter20.encode('utf-8') if sys.version_info[0] == 2 else iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 3)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.userIds is None:
raise TProtocolException(message='Required field userIds is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUsersToGroup_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUsersToGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class removeUsersFromGroup_args(object):
"""
Attributes:
- authzToken
- userIds
- groupId
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.LIST, 'userIds', (TType.STRING, 'UTF8', False), None, ), # 2
(3, TType.STRING, 'groupId', 'UTF8', None, ), # 3
)
def __init__(self, authzToken=None, userIds=None, groupId=None,):
self.authzToken = authzToken
self.userIds = userIds
self.groupId = groupId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.userIds = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.userIds.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('removeUsersFromGroup_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.userIds is not None:
oprot.writeFieldBegin('userIds', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.userIds))
for iter27 in self.userIds:
oprot.writeString(iter27.encode('utf-8') if sys.version_info[0] == 2 else iter27)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 3)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.userIds is None:
raise TProtocolException(message='Required field userIds is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class removeUsersFromGroup_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('removeUsersFromGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class transferGroupOwnership_args(object):
"""
Attributes:
- authzToken
- groupId
- newOwnerId
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'groupId', 'UTF8', None, ), # 2
(3, TType.STRING, 'newOwnerId', 'UTF8', None, ), # 3
)
def __init__(self, authzToken=None, groupId=None, newOwnerId=None,):
self.authzToken = authzToken
self.groupId = groupId
self.newOwnerId = newOwnerId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.newOwnerId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('transferGroupOwnership_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 2)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
if self.newOwnerId is not None:
oprot.writeFieldBegin('newOwnerId', TType.STRING, 3)
oprot.writeString(self.newOwnerId.encode('utf-8') if sys.version_info[0] == 2 else self.newOwnerId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
if self.newOwnerId is None:
raise TProtocolException(message='Required field newOwnerId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class transferGroupOwnership_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('transferGroupOwnership_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroupAdmins_args(object):
"""
Attributes:
- authzToken
- groupId
- adminIds
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'groupId', 'UTF8', None, ), # 2
(3, TType.LIST, 'adminIds', (TType.STRING, 'UTF8', False), None, ), # 3
)
def __init__(self, authzToken=None, groupId=None, adminIds=None,):
self.authzToken = authzToken
self.groupId = groupId
self.adminIds = adminIds
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.adminIds = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in range(_size28):
_elem33 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.adminIds.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroupAdmins_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 2)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
if self.adminIds is not None:
oprot.writeFieldBegin('adminIds', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.adminIds))
for iter34 in self.adminIds:
oprot.writeString(iter34.encode('utf-8') if sys.version_info[0] == 2 else iter34)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
if self.adminIds is None:
raise TProtocolException(message='Required field adminIds is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroupAdmins_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroupAdmins_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class removeGroupAdmins_args(object):
"""
Attributes:
- authzToken
- groupId
- adminIds
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'groupId', 'UTF8', None, ), # 2
(3, TType.LIST, 'adminIds', (TType.STRING, 'UTF8', False), None, ), # 3
)
def __init__(self, authzToken=None, groupId=None, adminIds=None,):
self.authzToken = authzToken
self.groupId = groupId
self.adminIds = adminIds
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.adminIds = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in range(_size35):
_elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.adminIds.append(_elem40)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('removeGroupAdmins_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 2)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
if self.adminIds is not None:
oprot.writeFieldBegin('adminIds', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.adminIds))
for iter41 in self.adminIds:
oprot.writeString(iter41.encode('utf-8') if sys.version_info[0] == 2 else iter41)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
if self.adminIds is None:
raise TProtocolException(message='Required field adminIds is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class removeGroupAdmins_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('removeGroupAdmins_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class hasAdminAccess_args(object):
"""
Attributes:
- authzToken
- groupId
- adminId
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'groupId', 'UTF8', None, ), # 2
(3, TType.STRING, 'adminId', 'UTF8', None, ), # 3
)
def __init__(self, authzToken=None, groupId=None, adminId=None,):
self.authzToken = authzToken
self.groupId = groupId
self.adminId = adminId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.adminId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('hasAdminAccess_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 2)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
if self.adminId is not None:
oprot.writeFieldBegin('adminId', TType.STRING, 3)
oprot.writeString(self.adminId.encode('utf-8') if sys.version_info[0] == 2 else self.adminId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
if self.adminId is None:
raise TProtocolException(message='Required field adminId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class hasAdminAccess_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('hasAdminAccess_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class hasOwnerAccess_args(object):
"""
Attributes:
- authzToken
- groupId
- ownerId
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authzToken', (airavata.model.security.ttypes.AuthzToken, airavata.model.security.ttypes.AuthzToken.thrift_spec), None, ), # 1
(2, TType.STRING, 'groupId', 'UTF8', None, ), # 2
(3, TType.STRING, 'ownerId', 'UTF8', None, ), # 3
)
def __init__(self, authzToken=None, groupId=None, ownerId=None,):
self.authzToken = authzToken
self.groupId = groupId
self.ownerId = ownerId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authzToken = airavata.model.security.ttypes.AuthzToken()
self.authzToken.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.groupId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.ownerId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('hasOwnerAccess_args')
if self.authzToken is not None:
oprot.writeFieldBegin('authzToken', TType.STRUCT, 1)
self.authzToken.write(oprot)
oprot.writeFieldEnd()
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 2)
oprot.writeString(self.groupId.encode('utf-8') if sys.version_info[0] == 2 else self.groupId)
oprot.writeFieldEnd()
if self.ownerId is not None:
oprot.writeFieldBegin('ownerId', TType.STRING, 3)
oprot.writeString(self.ownerId.encode('utf-8') if sys.version_info[0] == 2 else self.ownerId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.authzToken is None:
raise TProtocolException(message='Required field authzToken is unset!')
if self.groupId is None:
raise TProtocolException(message='Required field groupId is unset!')
if self.ownerId is None:
raise TProtocolException(message='Required field ownerId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class hasOwnerAccess_result(object):
"""
Attributes:
- success
- gse
- ae
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'gse', (airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException, airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ae', (airavata.api.error.ttypes.AuthorizationException, airavata.api.error.ttypes.AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, gse=None, ae=None,):
self.success = success
self.gse = gse
self.ae = ae
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.gse = airavata.service.profile.groupmanager.cpi.error.ttypes.GroupManagerServiceException()
self.gse.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ae = airavata.api.error.ttypes.AuthorizationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('hasOwnerAccess_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.gse is not None:
oprot.writeFieldBegin('gse', TType.STRUCT, 1)
self.gse.write(oprot)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 2)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| python | 130,623 |
# Copyright 2013 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for general functionality of the ColorSpiral utility."""
# Builtins
import colorsys
from math import pi
import os
import unittest
from cmath import rect
# Do we have ReportLab? Raise error if not present.
from Bio import MissingPythonDependencyError
try:
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.pagesizes import A4
except ImportError:
raise MissingPythonDependencyError(
"Install reportlab if you want to use Bio.Graphics."
) from None
# Biopython Bio.Graphics.ColorSpiral
from Bio.Graphics.ColorSpiral import ColorSpiral, get_colors, get_color_dict
class SpiralTest(unittest.TestCase):
"""Construct and draw ColorSpiral colours placed on HSV spiral."""
def setUp(self):
"""Set up canvas for drawing."""
output_filename = os.path.join("Graphics", "spiral_test.pdf")
self.c = Canvas(output_filename, pagesize=A4)
# co-ordinates of the centre of the canvas
self.x_0, self.y_0 = 0.5 * A4[0], 0.5 * A4[1]
def test_colorlist(self):
"""Get set of eight colours, no jitter, using ColorSpiral."""
cs = ColorSpiral(a=4, b=0.33, jitter=0)
colours = list(cs.get_colors(8))
cstr = ["(%.2f, %.2f, %.2f)" % (r, g, b) for r, g, b in colours]
expected = [
"(0.64, 0.74, 0.81)",
"(0.68, 0.52, 0.76)",
"(0.72, 0.41, 0.55)",
"(0.68, 0.39, 0.31)",
"(0.63, 0.54, 0.22)",
"(0.48, 0.59, 0.13)",
"(0.24, 0.54, 0.06)",
"(0.01, 0.50, -0.00)",
]
self.assertEqual(cstr, expected)
def test_colorspiral(self):
"""Get set of 16 colours, no jitter, using ColorSpiral."""
cs = ColorSpiral(a=4, b=0.33, jitter=0)
radius = A4[0] * 0.025
for r, g, b in cs.get_colors(16):
self.c.setFillColor((r, g, b))
# Convert HSV colour to rectangular coordinates on HSV disc
h, s, v = colorsys.rgb_to_hsv(r, g, b)
coords = rect(s * A4[0] * 0.45, h * 2 * pi)
x, y = self.x_0 + coords.real, self.y_0 + coords.imag
self.c.ellipse(
x - radius, y - radius, x + radius, y + radius, stroke=0, fill=1
)
self.finish()
def finish(self):
"""Clean up and save image."""
self.c.save()
class SquareTest(unittest.TestCase):
"""Construct and draw ColorSpiral colours placed in a square, with jitter."""
def setUp(self):
"""Set up canvas for drawing."""
output_filename = os.path.join("Graphics", "square_test.pdf")
self.c = Canvas(output_filename, pagesize=(500, 500))
def test_colorspiral(self):
"""Set of 625 colours, with jitter, using get_colors()."""
boxedge = 20
boxes_per_row = 25
rows = 0
for i, c in enumerate(get_colors(625)):
self.c.setFillColor(c)
x1 = boxedge * (i % boxes_per_row)
y1 = rows * boxedge
self.c.rect(x1, y1, boxedge, boxedge, fill=1, stroke=0)
if not (i + 1) % boxes_per_row:
rows += 1
self.finish()
def finish(self):
"""Clean up and save image."""
self.c.save()
class DictTest(unittest.TestCase):
"""Generate set of colours on the basis of an iterable."""
def test_dict(self):
"""get_color_dict() for classes A-D, no jitter."""
classes = ["A", "B", "C", "D"]
colors = get_color_dict(classes, jitter=0)
cstr = [
"%s: (%.2f, %.2f, %.2f)" % (c, r, g, b)
for c, (r, g, b) in sorted(colors.items())
]
expected = [
"A: (0.52, 0.76, 0.69)",
"B: (0.40, 0.31, 0.68)",
"C: (0.59, 0.13, 0.47)",
"D: (0.50, 0.00, 0.00)",
]
self.assertEqual(cstr, expected)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| python | 4,222 |
from django.urls import path, include
from . import views
app_name = 'project'
urlpatterns = [
path('list', views.list_projects, name = 'list_projects'),
path('<int:id>/mentors', views.list_project_mentors, name = 'list_project_mentors'),
] | python | 249 |
import requests
def register(api):
def fixer(func):
def wrapper(self,*args,**kwargs):
params = func(self,*args,**kwargs)
url = self.host + api
response = requests.get(url,params=params)
return response.json()
return wrapper
return fixer | python | 310 |
from sys import stdin
n = int(stdin.readline())
# get n numbers
data = list(map(int, stdin.readline().split()))
# get max from data
m = max(data)
data = [(x/m) * 100 for x in data]
print(sum(data) / n) | python | 209 |
"""\
Examples
To check all pages on the production server:
%(prog)s production.ini
For the development.ini you must supply the paster app name:
%(prog)s development.ini --app-name app
"""
import json
import logging
from future.utils import itervalues
from pyramid.traversal import resource_path
EPILOG = __doc__
logger = logging.getLogger(__name__)
def check_path(testapp, path):
try:
res = testapp.get(path, status='*').maybe_follow(status='*')
except Exception:
logger.exception('Render failed: %s', path)
return False
if res.status_int != 200:
logger.error('Render failed (%s): %s', res.status, path)
script = res.html.find('script', **{'data-prop-name': 'context'})
if script is not None:
context = json.loads(script.text)
if 'detail' in context:
logger.debug(context['detail'])
else:
logger.debug(json.dumps(context, indent=4))
return False
return True
def run(testapp, collections=None):
app = testapp.app
root = app.root_factory(app)
if not collections:
collections = root.by_item_type.keys()
check_path(testapp, '/')
for collection_name in collections:
collection = root[collection_name]
collection_path = resource_path(collection, '')
check_path(testapp, collection_path)
failed = 0
for count, item in enumerate(itervalues(collection)):
path = resource_path(item, '')
if not check_path(testapp, path):
failed += 1
if failed:
logger.info('Collection %s: %d of %d failed to render.',
collection_path, failed, count)
else:
logger.info('Collection %s: all %d rendered ok',
collection_path, count)
def internal_app(configfile, app_name=None, username='TEST', accept='text/html'):
from pyramid import paster
from webtest import TestApp
app = paster.get_app(configfile, app_name)
environ = {
'HTTP_ACCEPT': accept,
'REMOTE_USER': username,
}
return TestApp(app, environ)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Check rendering of items", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--item-type', action='append', help="Item type")
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('--username', '-u', default='TEST',
help="User uuid/email")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('path', nargs='*', help="path to test")
args = parser.parse_args()
logging.basicConfig()
testapp = internal_app(args.config_uri, args.app_name, args.username)
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('clincoded').setLevel(logging.DEBUG)
if args.path:
failed = 0
for path in args.path:
if not check_path(testapp, path):
failed += 1
if failed:
logger.info('Paths: %d of %d failed to render.',
failed, len(args.path))
else:
logger.info('Paths: all %d rendered ok', len(args.path))
else:
run(testapp, args.item_type)
if __name__ == '__main__':
main()
| python | 3,431 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import time
from unittest import mock
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from azure.identity.aio import ManagedIdentityCredential
from azure.identity._constants import Endpoints, EnvironmentVariables
from azure.identity._internal.user_agent import USER_AGENT
import pytest
from helpers import build_aad_response, mock_response, Request
from helpers_async import async_validating_transport
MANAGED_IDENTITY_ENVIRON = "azure.identity.aio._credentials.managed_identity.os.environ"
@pytest.mark.asyncio
async def test_cloud_shell():
"""Cloud Shell environment: only MSI_ENDPOINT set"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
endpoint = "http://localhost:42/token"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"resource": scope},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 0,
"expires_on": expires_on,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
with mock.patch("os.environ", {EnvironmentVariables.MSI_ENDPOINT: endpoint}):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_cloud_shell_user_assigned_identity():
"""Cloud Shell environment: only MSI_ENDPOINT set"""
access_token = "****"
expires_on = 42
client_id = "some-guid"
expected_token = AccessToken(access_token, expires_on)
endpoint = "http://localhost:42/token"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"client_id": client_id, "resource": scope},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 0,
"expires_on": expires_on,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
with mock.patch("os.environ", {EnvironmentVariables.MSI_ENDPOINT: endpoint}):
token = await ManagedIdentityCredential(client_id=client_id, transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_prefers_app_service_2017_09_01():
"""When the environment is configured for both App Service versions, the credential should prefer 2017-09-01
Support for 2019-08-01 was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test
should be removed when that support is added back.
"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
)
]
* 2,
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on), # linux format
"resource": scope,
"token_type": "Bearer",
}
),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "1/1/1970 12:00:{} AM +00:00".format(expires_on), # windows format
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{
EnvironmentVariables.IDENTITY_ENDPOINT: url,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: url,
EnvironmentVariables.MSI_SECRET: secret,
},
clear=True,
):
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
credential = ManagedIdentityCredential(transport=transport)
token = await credential.get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
@pytest.mark.skip("2019-08-01 support was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test should be enabled when that support is added back.")
@pytest.mark.asyncio
async def test_app_service_2019_08_01():
"""App Service 2019-08-01: IDENTITY_ENDPOINT, IDENTITY_HEADER set"""
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
async def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["X-IDENTITY-HEADER"] == secret
assert request.headers["User-Agent"] == USER_AGENT
assert request.query["api-version"] == "2019-08-01"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
# when configuration for both API versions is present, the credential should prefer the most recent
for environment in [
{EnvironmentVariables.IDENTITY_ENDPOINT: endpoint, EnvironmentVariables.IDENTITY_HEADER: secret},
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: endpoint,
EnvironmentVariables.MSI_SECRET: secret,
},
]:
with mock.patch.dict("os.environ", environment, clear=True):
token = await ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_app_service_2017_09_01():
"""test parsing of App Service MSI 2017-09-01's eccentric platform-dependent expires_on strings"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
)
]
* 2,
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on), # linux format
"resource": scope,
"token_type": "Bearer",
}
),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "1/1/1970 12:00:{} AM +00:00".format(expires_on), # windows format
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: url, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_app_service_user_assigned_identity():
"""App Service 2017-09-01: MSI_ENDPOINT, MSI_SECRET set"""
access_token = "****"
expires_on = 42
client_id = "some-guid"
expected_token = AccessToken(access_token, expires_on)
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = async_validating_transport(
requests=[
Request(
base_url=endpoint,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "clientid": client_id, "resource": scope},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
with mock.patch(
"os.environ", {EnvironmentVariables.MSI_ENDPOINT: endpoint, EnvironmentVariables.MSI_SECRET: secret}
):
token = await ManagedIdentityCredential(client_id=client_id, transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_client_id_none():
"""the credential should ignore client_id=None"""
expected_access_token = "****"
scope = "scope"
async def send(request, **_):
assert "client_id" not in request.query # IMDS
assert "clientid" not in request.query # App Service 2017-09-01
if request.data:
assert "client_id" not in request.body # Cloud Shell
return mock_response(
json_payload=(
build_aad_response(
access_token=expected_access_token, expires_on="01/01/1970 00:00:42 +00:00", resource=scope
)
)
)
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, {}, clear=True):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = await credential.get_token(scope)
assert token.token == expected_access_token
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: "https://localhost", EnvironmentVariables.MSI_SECRET: "secret"},
clear=True,
):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = await credential.get_token(scope)
assert token.token == expected_access_token
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON, {EnvironmentVariables.MSI_ENDPOINT: "https://localhost"}, clear=True,
):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = await credential.get_token(scope)
assert token.token == expected_access_token
@pytest.mark.asyncio
async def test_imds():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
scope = "scope"
transport = async_validating_transport(
requests=[
Request(url=Endpoints.IMDS), # first request should be availability probe => match only the URL
Request(
base_url=Endpoints.IMDS,
method="GET",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_params={"api-version": "2018-02-01", "resource": scope},
),
],
responses=[
# probe receives error response
mock_response(status_code=400, json_payload={"error": "this is an error message"}),
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 42,
"expires_on": expires_on,
"ext_expires_in": 42,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
),
],
)
# ensure e.g. $MSI_ENDPOINT isn't set, so we get ImdsCredential
with mock.patch.dict("os.environ", clear=True):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_imds_user_assigned_identity():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = Endpoints.IMDS
scope = "scope"
client_id = "some-guid"
transport = async_validating_transport(
requests=[
Request(base_url=url), # first request should be availability probe => match only the URL
Request(
base_url=url,
method="GET",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_params={"api-version": "2018-02-01", "client_id": client_id, "resource": scope},
),
],
responses=[
# probe receives error response
mock_response(status_code=400, json_payload={"error": "this is an error message"}),
mock_response(
json_payload={
"access_token": access_token,
"client_id": client_id,
"expires_in": 42,
"expires_on": expires_on,
"ext_expires_in": 42,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
),
],
)
# ensure e.g. $MSI_ENDPOINT isn't set, so we get ImdsCredential
with mock.patch.dict("os.environ", clear=True):
token = await ManagedIdentityCredential(client_id=client_id, transport=transport).get_token(scope)
assert token == expected_token
@pytest.mark.asyncio
async def test_service_fabric():
"""Service Fabric 2019-07-01-preview"""
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
thumbprint = "SHA1HEX"
scope = "scope"
async def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["Secret"] == secret
assert request.query["api-version"] == "2019-07-01-preview"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT: thumbprint,
},
):
token = await ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_azure_arc(tmpdir):
"""Azure Arc 2019-11-01"""
access_token = "****"
api_version = "2019-11-01"
expires_on = 42
identity_endpoint = "http://localhost:42/token"
imds_endpoint = "http://localhost:42"
scope = "scope"
secret_key = "XXXX"
key_file = tmpdir.mkdir("key").join("key_file.key")
key_file.write(secret_key)
assert key_file.read() == secret_key
key_path = os.path.join(key_file.dirname, key_file.basename)
transport = async_validating_transport(
requests=[
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true"},
required_params={"api-version": api_version, "resource": scope},
),
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true", "Authorization": "Basic {}".format(secret_key)},
required_params={"api-version": api_version, "resource": scope},
),
],
responses=[
# first response gives path to authentication key
mock_response(status_code=401, headers={"WWW-Authenticate": "Basic realm={}".format(key_path)}),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": expires_on,
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: identity_endpoint,
EnvironmentVariables.IMDS_ENDPOINT: imds_endpoint,
},
):
token = await ManagedIdentityCredential(transport=transport).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.asyncio
async def test_azure_arc_client_id():
"""Azure Arc doesn't support user-assigned managed identity"""
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: "http://localhost:42/token",
EnvironmentVariables.IMDS_ENDPOINT: "http://localhost:42",
}
):
credential = ManagedIdentityCredential(client_id="some-guid")
with pytest.raises(ClientAuthenticationError):
await credential.get_token("scope")
| python | 19,050 |
# This file is generated by /Users/travis/build/MacPython/numpy-wheels/numpy/setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
atlas_3_10_blas_info={}
atlas_3_10_blas_threads_info={}
atlas_threads_info={}
blas_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)], 'extra_compile_args': ['-msse3', '-I/System/Library/Frameworks/vecLib.framework/Headers']}
atlas_blas_threads_info={}
openblas_info={}
lapack_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)], 'extra_compile_args': ['-msse3']}
openblas_lapack_info={}
atlas_3_10_threads_info={}
atlas_info={}
atlas_3_10_info={}
lapack_mkl_info={}
blas_mkl_info={}
atlas_blas_info={}
mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| python | 1,372 |
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import random
import re
from collections import defaultdict
import threading
import socket
import json
import sys
import ipaddress
import asyncio
from typing import NamedTuple, Optional, Sequence, List, Dict, Tuple
import traceback
import dns
import dns.resolver
import aiorpcx
from aiorpcx import TaskGroup
from aiohttp import ClientResponse
from . import util
from .util import (log_exceptions, ignore_exceptions,
bfh, SilentTaskGroup, make_aiohttp_session, send_exception_to_crash_reporter,
is_hash256_str, is_non_negative_integer)
from .bitcoin import COIN
from . import constants
from . import blockchain
from . import bitcoin
from .constants import CHUNK_SIZE
from .blockchain import Blockchain, HEADER_SIZE
from .axe_net import AxeNet
from .interface import (Interface, serialize_server, deserialize_server,
RequestTimedOut, NetworkTimeout, BUCKET_NAME_OF_ONION_SERVERS)
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
from .simple_config import SimpleConfig
from .i18n import _
from .logging import get_logger, Logger
_logger = get_logger(__name__)
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
NUM_TARGET_CONNECTED_SERVERS = 10
NUM_RECENT_SERVERS = 20
def parse_servers(result: Sequence[Tuple[str, str, List[str]]]) -> Dict[str, dict]:
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match(r"[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match(r"p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_noonion(servers):
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
def filter_protocol(hostmap, protocol='s'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap=None, protocol='s', exclude_set=None):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
if exclude_set is None:
exclude_set = set()
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
class NetworkParameters(NamedTuple):
host: str
port: str
protocol: str
proxy: Optional[dict]
auto_connect: bool
oneserver: bool = False
proxy_modes = ['socks4', 'socks5']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s: str) -> Optional[dict]:
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
# FIXME raw IPv6 address fails here
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
class BestEffortRequestFailed(Exception): pass
class TxBroadcastError(Exception):
def get_message_for_gui(self):
raise NotImplementedError()
class TxBroadcastHashMismatch(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}\n\n{}" \
.format(_("The server returned an unexpected transaction ID when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Axe Electrum."),
str(self))
class TxBroadcastServerReturnedError(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}\n\n{}" \
.format(_("The server returned an error when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Axe Electrum."),
str(self))
class TxBroadcastUnknownError(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}" \
.format(_("Unknown error when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Axe Electrum."))
class UntrustedServerReturnedError(Exception):
def __init__(self, *, original_exception):
self.original_exception = original_exception
def __str__(self):
return _("The server returned an error.")
def __repr__(self):
return f"<UntrustedServerReturnedError original_exception: {repr(self.original_exception)}>"
INSTANCE = None
TOR_WARN_MSG = _('Warning: Tor proxy is not detected, to enable'
' it read the docs:')
TOR_DOCS_TITLE = _('Tor Setup Docs')
TOR_DOCS_URI = ('https://github.com/axerunners/electrum-axe/'
'blob/master/docs/tor.md')
TOR_DOCS_URI_QT = f'<br><a href="{TOR_DOCS_URI}">{TOR_DOCS_TITLE}</a>'
TOR_DOCS_URI_KIVY = (f'\n\n[color=#00f][ref={TOR_DOCS_URI}]'
f'{TOR_DOCS_TITLE}[/ref][/color]')
class Network(Logger):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
"""
LOGGING_SHORTCUT = 'n'
TOR_WARN_MSG_QT = f'{TOR_WARN_MSG} {TOR_DOCS_URI_QT}'
TOR_WARN_MSG_KIVY = f'{TOR_WARN_MSG} {TOR_DOCS_URI_KIVY}'
TOR_WARN_MSG_TXT = f'{TOR_WARN_MSG}\n{TOR_DOCS_URI}'
TOR_AUTO_ON_MSG = _('Detect Tor proxy on wallet startup')
FIAT_BYPASS_TOR_MSG = _('Bypass Tor proxy for Fiat rates loading')
def __init__(self, config: SimpleConfig=None):
global INSTANCE
INSTANCE = self
Logger.__init__(self)
self.asyncio_loop = asyncio.get_event_loop()
assert self.asyncio_loop.is_running(), "event loop not running"
self._loop_thread = None # type: threading.Thread # set by caller; only used for sanity checks
if config is None:
config = {} # Do not use mutables as default values!
self.config = SimpleConfig(config) if isinstance(config, dict) else config # type: SimpleConfig
# Autodetect and enable Tor proxy on Network init
if self.config.get('tor_auto_on', True):
tor_detected = self.detect_tor_proxy()
if tor_detected:
self.config.set_key('proxy', tor_detected, False)
blockchain.read_blockchains(self.config)
self.logger.info(f"blockchains {list(map(lambda b: b.forkpoint, blockchain.blockchains.values()))}")
self._blockchain_preferred_block = self.config.get('blockchain_preferred_block', None) # type: Optional[Dict]
self._blockchain = blockchain.get_best_chain()
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.logger.warning('failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.main_taskgroup = None # type: TaskGroup
# locks
self.restart_lock = asyncio.Lock()
self.bhi_lock = asyncio.Lock()
self.callback_lock = threading.Lock()
self.recent_servers_lock = threading.RLock() # <- re-entrant
self.interfaces_lock = threading.Lock() # for mutating/iterating self.interfaces
# protx code locks
self.protx_info_resp_lock = threading.Lock()
self.server_peers = {} # returned by interface (servers that the main interface knows about)
self.recent_servers = self._read_recent_servers() # note: needs self.recent_servers_lock
self.banner = ''
self.donation_address = ''
self.relay_fee = None # type: Optional[int]
# List of all proposals on the network.
self.all_proposals = []
# callbacks set by the GUI
self.callbacks = defaultdict(list) # note: needs self.callback_lock
dir_path = os.path.join(self.config.path, 'certs')
util.make_dir(dir_path)
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# the main server we are currently communicating with
self.interface = None # type: Interface
# set of servers we have an ongoing connection with
self.interfaces = {} # type: Dict[str, Interface]
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.server_queue = None
self.proxy = None
# Dump network messages (all interfaces). Set at runtime from the console.
self.debug = False
# protx info responses data
self.protx_info_resp = []
# create AxeNet
self.axe_net = AxeNet(self, config)
# create MNList instance
from .protx_list import MNList
self.mn_list = MNList(self, config)
self._set_status('disconnected')
def run_from_another_thread(self, coro):
assert self._loop_thread != threading.current_thread(), 'must not be called from network thread'
fut = asyncio.run_coroutine_threadsafe(coro, self.asyncio_loop)
return fut.result()
@staticmethod
def get_instance() -> Optional["Network"]:
return INSTANCE
def with_recent_servers_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.recent_servers_lock:
return func(self, *args, **kwargs)
return func_wrapper
def register_callback(self, callback, events):
with self.callback_lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.callback_lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.callback_lock:
callbacks = self.callbacks[event][:]
for callback in callbacks:
# FIXME: if callback throws, we will lose the traceback
if asyncio.iscoroutinefunction(callback):
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
else:
self.asyncio_loop.call_soon_threadsafe(callback, event, *args)
def _read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
@with_recent_servers_lock
def _save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
interface = self.interface
return interface.tip if interface else 0
async def _server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.logger.info('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.logger.info(f'{self.default_server} is lagging ({sh} vs {lh})')
return result
def _set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
interface = self.interface
return interface is not None and interface.ready.done()
def is_connecting(self):
return self.connection_status == 'connecting'
async def _request_server_info(self, interface):
await interface.ready
session = interface.session
async def get_banner():
self.banner = await session.send_request('server.banner')
self.notify('banner')
async def get_donation_address():
addr = await session.send_request('server.donation_address')
if not bitcoin.is_address(addr):
if addr: # ignore empty string
self.logger.info(f"invalid donation address from server: {repr(addr)}")
addr = ''
self.donation_address = addr
async def get_server_peers():
server_peers = await session.send_request('server.peers.subscribe')
random.shuffle(server_peers)
max_accepted_peers = len(constants.net.DEFAULT_SERVERS) + NUM_RECENT_SERVERS
server_peers = server_peers[:max_accepted_peers]
self.server_peers = parse_servers(server_peers)
self.notify('servers')
async def get_relay_fee():
relayfee = await session.send_request('blockchain.relayfee')
if relayfee is None:
self.relay_fee = None
else:
relayfee = int(relayfee * COIN)
self.relay_fee = max(0, relayfee)
async with TaskGroup() as group:
await group.spawn(get_banner)
await group.spawn(get_donation_address)
await group.spawn(get_server_peers)
await group.spawn(get_relay_fee)
await group.spawn(self._request_fee_estimates(interface))
async def _request_fee_estimates(self, interface):
session = interface.session
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
async with TaskGroup() as group:
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
fee_tasks = []
for i in FEE_ETA_TARGETS:
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
self.config.mempool_fees = histogram = histogram_task.result()
self.logger.info(f'fee_histogram {histogram}')
self.notify('fee_histogram')
fee_estimates_eta = {}
for nblock_target, task in fee_tasks:
fee = int(task.result() * COIN)
fee_estimates_eta[nblock_target] = fee
if fee < 0: continue
self.config.update_fee_estimates(nblock_target, fee)
self.logger.info(f'fee_estimates {fee_estimates_eta}')
self.notify('fee')
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'servers':
value = self.get_servers()
elif key == 'protx-info':
with self.protx_info_resp_lock:
if self.protx_info_resp:
value = self.protx_info_resp.pop()
else:
value = {}
else:
raise Exception('unexpected trigger key {}'.format(key))
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self) -> NetworkParameters:
host, port, protocol = deserialize_server(self.default_server)
return NetworkParameters(host=host,
port=port,
protocol=protocol,
proxy=self.proxy,
auto_connect=self.auto_connect,
oneserver=self.oneserver)
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self) -> List[str]:
"""The list of servers for the connected interfaces."""
with self.interfaces_lock:
return list(self.interfaces)
@with_recent_servers_lock
def get_servers(self):
# note: order of sources when adding servers here is crucial!
# don't let "server_peers" overwrite anything,
# otherwise main server can eclipse the client
out = dict()
# add servers received from main interface
server_peers = self.server_peers
if server_peers:
out.update(filter_version(server_peers.copy()))
# hardcoded servers
out.update(constants.net.DEFAULT_SERVERS)
# add recent servers
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host in out:
out[host].update({protocol: port})
else:
out[host] = {protocol: port}
# potentially filter out some
if self.config.get('noonion'):
out = filter_noonion(out)
return out
def _start_interface(self, server: str):
if server not in self.interfaces and server not in self.connecting:
if server == self.default_server:
self.logger.info(f"connecting to {server} as new interface")
self._set_status('connecting')
self.connecting.add(server)
self.server_queue.put(server)
def _start_random_interface(self):
with self.interfaces_lock:
exclude_set = self.disconnected_servers | set(self.interfaces) | self.connecting
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self._start_interface(server)
return server
def _set_proxy(self, proxy: Optional[dict]):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.logger.info(f'setting proxy {proxy}')
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
if sys.platform == 'win32':
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
# see #4421
socket.getaddrinfo = self._fast_getaddrinfo
else:
socket.getaddrinfo = socket._getaddrinfo
self.trigger_callback('proxy_set', self.proxy)
@staticmethod
def _fast_getaddrinfo(host, *args, **kwargs):
def needs_dns_resolving(host):
try:
ipaddress.ip_address(host)
return False # already valid IP
except ValueError:
pass # not an IP
if str(host) in ('localhost', 'localhost.',):
return False
return True
def resolve_with_dnspython(host):
addrs = []
# try IPv6
try:
answers = dns.resolver.query(host, dns.rdatatype.AAAA)
addrs += [str(answer) for answer in answers]
except dns.exception.DNSException as e:
pass
except BaseException as e:
_logger.info(f'dnspython failed to resolve dns (AAAA) with error: {e}')
# try IPv4
try:
answers = dns.resolver.query(host, dns.rdatatype.A)
addrs += [str(answer) for answer in answers]
except dns.exception.DNSException as e:
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN this is normal.
# Simply report back failure; except if we already have some results.
if not addrs:
raise socket.gaierror(11001, 'getaddrinfo failed') from e
except BaseException as e:
# Possibly internal error in dnspython :( see #4483
_logger.info(f'dnspython failed to resolve dns (A) with error: {e}')
if addrs:
return addrs
# Fall back to original socket.getaddrinfo to resolve dns.
return [host]
addrs = [host]
if needs_dns_resolving(host):
addrs = resolve_with_dnspython(host)
list_of_list_of_socketinfos = [socket._getaddrinfo(addr, *args, **kwargs) for addr in addrs]
list_of_socketinfos = [item for lst in list_of_list_of_socketinfos for item in lst]
return list_of_socketinfos
@log_exceptions
async def set_parameters(self, net_params: NetworkParameters):
proxy = net_params.proxy
proxy_str = serialize_proxy(proxy)
host, port, protocol = net_params.host, net_params.port, net_params.protocol
server_str = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy['mode']) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', net_params.auto_connect, False)
self.config.set_key('oneserver', net_params.oneserver, False)
self.config.set_key('proxy', proxy_str, False)
self.config.set_key('server', server_str, True)
# abort if changes were not allowed by config
if self.config.get('server') != server_str \
or self.config.get('proxy') != proxy_str \
or self.config.get('oneserver') != net_params.oneserver:
return
async with self.restart_lock:
self.auto_connect = net_params.auto_connect
if self.proxy != proxy or self.protocol != protocol or self.oneserver != net_params.oneserver:
# Restart the network defaulting to the given server
await self._stop()
self.default_server = server_str
await self._start()
elif self.default_server != server_str:
await self.switch_to_interface(server_str)
else:
await self.switch_lagging_interface()
await self.axe_net.set_parameters()
@log_exceptions
async def restart(self):
async with self.restart_lock:
await self._stop()
await self._start()
def _set_oneserver(self, oneserver: bool):
self.num_server = NUM_TARGET_CONNECTED_SERVERS if not oneserver else 0
self.oneserver = bool(oneserver)
async def _switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
await self.switch_to_interface(random.choice(servers))
async def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.auto_connect and await self._server_is_lagging():
# switch to one that has the correct header (not height)
best_header = self.blockchain().read_header(self.get_local_height())
with self.interfaces_lock: interfaces = list(self.interfaces.values())
filtered = list(filter(lambda iface: iface.tip_header == best_header, interfaces))
if filtered:
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
async def switch_unwanted_fork_interface(self):
"""If auto_connect and main interface is not on preferred fork,
try to switch to preferred fork.
"""
if not self.auto_connect or not self.interface:
return
with self.interfaces_lock: interfaces = list(self.interfaces.values())
# try to switch to preferred fork
if self._blockchain_preferred_block:
pref_height = self._blockchain_preferred_block['height']
pref_hash = self._blockchain_preferred_block['hash']
if self.interface.blockchain.check_hash(pref_height, pref_hash):
return # already on preferred fork
filtered = list(filter(lambda iface: iface.blockchain.check_hash(pref_height, pref_hash),
interfaces))
if filtered:
self.logger.info("switching to preferred fork")
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
return
else:
self.logger.info("tried to switch to preferred fork but no interfaces are on it")
# try to switch to best chain
if self.blockchain().parent is None:
return # already on best chain
filtered = list(filter(lambda iface: iface.blockchain.parent is None,
interfaces))
if filtered:
self.logger.info("switching to best chain")
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
else:
# FIXME switch to best available?
self.logger.info("tried to switch to best chain but no interfaces are on it")
async def switch_to_interface(self, server: str):
"""Switch to server as our main interface. If no connection exists,
queue interface to be started. The actual switch will
happen when the interface becomes ready.
"""
self.default_server = server
old_interface = self.interface
old_server = old_interface.server if old_interface else None
# Stop any current interface in order to terminate subscriptions,
# and to cancel tasks in interface.group.
# However, for headers sub, give preference to this interface
# over unknown ones, i.e. start it again right away.
if old_server and old_server != server:
await self._close_interface(old_interface)
if len(self.interfaces) <= self.num_server:
self._start_interface(old_server)
if server not in self.interfaces:
self.interface = None
self._start_interface(server)
return
i = self.interfaces[server]
if old_interface != i:
self.logger.info(f"switching to {server}")
blockchain_updated = i.blockchain != self.blockchain()
self.interface = i
await i.group.spawn(self._request_server_info(i))
self.trigger_callback('default_server_changed')
self._set_status('connected')
self.trigger_callback('network_updated')
if blockchain_updated: self.trigger_callback('blockchain_updated')
async def _close_interface(self, interface):
if interface:
with self.interfaces_lock:
if self.interfaces.get(interface.server) == interface:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
await interface.close()
@with_recent_servers_lock
def _add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[:NUM_RECENT_SERVERS]
self._save_recent_servers()
async def connection_down(self, interface: Interface):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
if not interface: return
server = interface.server
self.disconnected_servers.add(server)
if server == self.default_server:
self._set_status('disconnected')
await self._close_interface(interface)
self.trigger_callback('network_updated')
def get_network_timeout_seconds(self, request_type=NetworkTimeout.Generic) -> int:
if self.oneserver and not self.auto_connect:
return request_type.MOST_RELAXED
if self.proxy:
return request_type.RELAXED
return request_type.NORMAL
@ignore_exceptions # do not kill main_taskgroup
@log_exceptions
async def _run_new_interface(self, server):
interface = Interface(self, server, self.proxy)
# note: using longer timeouts here as DNS can sometimes be slow!
timeout = self.get_network_timeout_seconds(NetworkTimeout.Generic)
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
self.logger.info(f"couldn't launch iface {server} -- {repr(e)}")
await interface.close()
return
else:
with self.interfaces_lock:
assert server not in self.interfaces
self.interfaces[server] = interface
finally:
try: self.connecting.remove(server)
except KeyError: pass
if server == self.default_server:
await self.switch_to_interface(server)
self._add_recent_server(server)
self.trigger_callback('network_updated')
def check_interface_against_healthy_spread_of_connected_servers(self, iface_to_check) -> bool:
# main interface is exempt. this makes switching servers easier
if iface_to_check.is_main_server():
return True
if not iface_to_check.bucket_based_on_ipaddress():
return True
# bucket connected interfaces
with self.interfaces_lock:
interfaces = list(self.interfaces.values())
if iface_to_check in interfaces:
interfaces.remove(iface_to_check)
buckets = defaultdict(list)
for iface in interfaces:
buckets[iface.bucket_based_on_ipaddress()].append(iface)
# check proposed server against buckets
onion_servers = buckets[BUCKET_NAME_OF_ONION_SERVERS]
if iface_to_check.is_tor():
# keep number of onion servers below half of all connected servers
if len(onion_servers) > NUM_TARGET_CONNECTED_SERVERS // 2:
return False
else:
bucket = iface_to_check.bucket_based_on_ipaddress()
if len(buckets[bucket]) > 0:
return False
return True
async def _init_headers_file(self):
b = blockchain.get_best_chain()
filename = b.path()
len_checkpoints = len(constants.net.CHECKPOINTS)
length = HEADER_SIZE * len_checkpoints * CHUNK_SIZE
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
for i in range(len_checkpoints):
for height, header_data in b.checkpoints[i][2]:
f.seek(height*80)
bin_header = util.bfh(header_data)
f.write(bin_header)
util.ensure_sparse_file(filename)
with b.lock:
b.update_size()
def best_effort_reliable(func):
async def make_reliable_wrapper(self, *args, **kwargs):
for i in range(10):
iface = self.interface
# retry until there is a main interface
if not iface:
await asyncio.sleep(0.1)
continue # try again
# wait for it to be usable
iface_ready = iface.ready
iface_disconnected = iface.got_disconnected
await asyncio.wait([iface_ready, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
if not iface_ready.done() or iface_ready.cancelled():
await asyncio.sleep(0.1)
continue # try again
# try actual request
success_fut = asyncio.ensure_future(func(self, *args, **kwargs))
await asyncio.wait([success_fut, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
if success_fut.done() and not success_fut.cancelled():
if success_fut.exception():
try:
raise success_fut.exception()
except RequestTimedOut:
await iface.close()
await iface_disconnected
continue # try again
return success_fut.result()
# otherwise; try again
raise BestEffortRequestFailed('no interface to do request on... gave up.')
return make_reliable_wrapper
def catch_server_exceptions(func):
async def wrapper(self, *args, **kwargs):
try:
return await func(self, *args, **kwargs)
except aiorpcx.jsonrpc.CodeMessageError as e:
raise UntrustedServerReturnedError(original_exception=e) from e
return wrapper
@best_effort_reliable
@catch_server_exceptions
async def get_merkle_for_transaction(self, tx_hash: str, tx_height: int) -> dict:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
if not is_non_negative_integer(tx_height):
raise Exception(f"{repr(tx_height)} is not a block height")
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
@best_effort_reliable
async def broadcast_transaction(self, tx, *, timeout=None) -> None:
if timeout is None:
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
try:
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout)
# note: both 'out' and exception messages are untrusted input from the server
except (RequestTimedOut, asyncio.CancelledError, asyncio.TimeoutError):
raise # pass-through
except aiorpcx.jsonrpc.CodeMessageError as e:
self.logger.info(f"broadcast_transaction error [DO NOT TRUST THIS MESSAGE]: {repr(e)}")
raise TxBroadcastServerReturnedError(self.sanitize_tx_broadcast_response(e.message)) from e
except BaseException as e: # intentional BaseException for sanity!
self.logger.info(f"broadcast_transaction error2 [DO NOT TRUST THIS MESSAGE]: {repr(e)}")
send_exception_to_crash_reporter(e)
raise TxBroadcastUnknownError() from e
if out != tx.txid():
self.logger.info(f"unexpected txid for broadcast_transaction [DO NOT TRUST THIS MESSAGE]: {out} != {tx.txid()}")
raise TxBroadcastHashMismatch(_("Server returned unexpected transaction ID."))
@staticmethod
def sanitize_tx_broadcast_response(server_msg) -> str:
# Unfortunately, bitcoind and hence the Electrum protocol doesn't return a useful error code.
# So, we use substring matching to grok the error message.
# server_msg is untrusted input so it should not be shown to the user. see #4968
server_msg = str(server_msg)
server_msg = server_msg.replace("\n", r"\n")
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/policy/policy.cpp
# grep "reason ="
policy_error_messages = {
r"version": _("Transaction uses non-standard version."),
r"tx-size": _("The transaction was rejected because it is too large (in bytes)."),
r"scriptsig-size": None,
r"scriptsig-not-pushonly": None,
r"scriptpubkey": None,
r"bare-multisig": None,
r"dust": _("Transaction could not be broadcast due to dust outputs."),
r"multi-op-return": _("The transaction was rejected because it contains multiple OP_RETURN outputs."),
}
for substring in policy_error_messages:
if substring in server_msg:
msg = policy_error_messages[substring]
return msg if msg else substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/script/script_error.cpp
script_error_messages = {
r"Script evaluated without error but finished with a false/empty top stack element",
r"Script failed an OP_VERIFY operation",
r"Script failed an OP_EQUALVERIFY operation",
r"Script failed an OP_CHECKMULTISIGVERIFY operation",
r"Script failed an OP_CHECKSIGVERIFY operation",
r"Script failed an OP_NUMEQUALVERIFY operation",
r"Script is too big",
r"Push value size limit exceeded",
r"Operation limit exceeded",
r"Stack size limit exceeded",
r"Signature count negative or greater than pubkey count",
r"Pubkey count negative or limit exceeded",
r"Opcode missing or not understood",
r"Attempted to use a disabled opcode",
r"Operation not valid with the current stack size",
r"Operation not valid with the current altstack size",
r"OP_RETURN was encountered",
r"Invalid OP_IF construction",
r"Negative locktime",
r"Locktime requirement not satisfied",
r"Signature hash type missing or not understood",
r"Non-canonical DER signature",
r"Data push larger than necessary",
r"Only non-push operators allowed in signatures",
r"Non-canonical signature: S value is unnecessarily high",
r"Dummy CHECKMULTISIG argument must be zero",
r"OP_IF/NOTIF argument must be minimal",
r"Signature must be zero for failed CHECK(MULTI)SIG operation",
r"NOPx reserved for soft-fork upgrades",
r"Public key is neither compressed or uncompressed",
r"Extra items left on stack after execution",
r"Signature is found in scriptCode",
}
for substring in script_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/validation.cpp
# grep "REJECT_"
# should come after script_error.cpp (due to e.g. non-mandatory-script-verify-flag)
validation_error_messages = {
r"coinbase",
r"tx-size-small",
r"non-final",
r"txn-already-in-mempool",
r"txn-mempool-conflict",
r"txn-already-known",
r"non-BIP68-final",
r"bad-txns-nonstandard-inputs",
r"bad-txns-too-many-sigops",
r"mempool min fee not met",
r"min relay fee not met",
r"absurdly-high-fee",
r"too-long-mempool-chain",
r"bad-txns-spends-conflicting-tx",
r"insufficient fee",
r"too many potential replacements",
r"replacement-adds-unconfirmed",
r"mempool full",
r"non-mandatory-script-verify-flag",
r"mandatory-script-verify-flag-failed",
}
for substring in validation_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/rpc/rawtransaction.cpp
# grep "RPC_TRANSACTION"
# grep "RPC_DESERIALIZATION_ERROR"
# https://github.com/bitcoin/bitcoin/blob/d7d7d315060620446bd363ca50f95f79d3260db7/src/util/error.cpp
rawtransaction_error_messages = {
r"Missing inputs",
r"transaction already in block chain",
r"Transaction already in block chain",
r"TX decode failed",
r"Peer-to-peer functionality missing or disabled",
r"Transaction rejected by AcceptToMemoryPool",
r"AcceptToMemoryPool failed",
}
for substring in rawtransaction_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/consensus/tx_verify.cpp
# grep "REJECT_"
tx_verify_error_messages = {
r"bad-txns-vin-empty",
r"bad-txns-vout-empty",
r"bad-txns-oversize",
r"bad-txns-vout-negative",
r"bad-txns-vout-toolarge",
r"bad-txns-txouttotal-toolarge",
r"bad-txns-inputs-duplicate",
r"bad-cb-length",
r"bad-txns-prevout-null",
r"bad-txns-inputs-missingorspent",
r"bad-txns-premature-spend-of-coinbase",
r"bad-txns-inputvalues-outofrange",
r"bad-txns-in-belowout",
r"bad-txns-fee-outofrange",
}
for substring in tx_verify_error_messages:
if substring in server_msg:
return substring
# Axed v0.13.1 specific errors
axed_specific_error_messages = {
r"bad-qc-not-allowed",
r"bad-qc-missing",
r"bad-qc-block",
r"bad-qc-invalid-null",
r"bad-qc-dup",
r"bad-qc-height",
r"bad-qc-invalid",
r"bad-tx-payload",
r"bad-qc-dup",
r"bad-qc-premature",
r"bad-qc-version",
r"bad-qc-quorum-hash",
r"bad-qc-type",
r"bad-qc-payload",
r"commitment-not-found",
r"excess-quorums",
r"bad-protx-addr",
r"bad-protx-ipaddr",
r"bad-protx-addr-port",
r"bad-protx-ipaddr-port",
r"bad-protx-sig",
r"bad-protx-inputs-hash",
r"bad-protx-type",
r"bad-protx-payload",
r"bad-protx-version",
r"bad-protx-mode",
r"bad-protx-key-null",
r"bad-protx-payee",
r"bad-protx-payee-dest",
r"bad-protx-payee-reuse",
r"bad-protx-operator-reward",
r"bad-protx-collateral",
r"bad-protx-collateral-dest",
r"bad-protx-collateral-pkh",
r"bad-protx-collateral-index",
r"bad-protx-collateral-reuse",
r"bad-protx-dup-addr",
r"bad-protx-dup-key",
r"bad-protx-key-not-same",
r"bad-protx-hash",
r"bad-protx-operator-payee",
r"bad-protx-reason",
r"bad-tx-type",
r"bad-tx-type-check",
r"bad-tx-type-proc",
r"failed-check-special-tx",
r"bad-cbtx-type",
r"bad-cbtx-invalid",
r"bad-cbtx-payload",
r"bad-cbtx-version",
r"bad-cbtx-height",
r"bad-cbtx-mnmerkleroot",
r"failed-calc-cb-mnmerkleroot",
r"failed-dmn-block",
r"bad-txns-payload-oversize",
r"bad-txns-type",
r"bad-txns-cb-type",
r"qc-not-allowed",
r"bad-txlockrequest",
r"tx-txlock-conflict",
r"tx-txlockreq-mempool-conflict",
r"txlockreq-tx-mempool-conflict",
r"protx-dup",
r"mempool min fee not met",
r"insufficient priority",
r"rate limited free transaction",
r"bad-txns-fee-negative",
r"bad-txns-BIP30",
r"bad-sb-start",
r"bad-blk-sigops",
r"bad-txns-nonfinal",
r"bad-cb-amount",
r"bad-cb-payee",
r"high-hash",
r"devnet-genesis",
r"bad-txnmrklroot",
r"bad-txns-duplicate",
r"bad-blk-length",
r"bad-cb-missing",
r"bad-cb-multiple",
r"conflict-tx-lock",
r"forked chain older than last checkpoint",
r"incorrect proof of work (DGW pre-fd-diffbitsork)",
r"bad-diffbits",
r"time-too-old",
r"time-too-new",
r"bad-cb-height",
r"bad-cb-type",
r"bad-prevblk",
r"Inputs unavailable",
r"Transaction check failed",
r"bad-version",
}
for substring in axed_specific_error_messages:
if substring in server_msg:
return substring
# otherwise:
return _("Unknown error")
@best_effort_reliable
@catch_server_exceptions
async def request_chunk(self, height: int, tip=None, *, can_return_early=False):
if not is_non_negative_integer(height):
raise Exception(f"{repr(height)} is not a block height")
return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early)
@best_effort_reliable
@catch_server_exceptions
async def get_transaction(self, tx_hash: str, *, timeout=None) -> str:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
return await self.interface.session.send_request('blockchain.transaction.get', [tx_hash],
timeout=timeout)
@best_effort_reliable
@catch_server_exceptions
async def get_history_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.get_history', [sh])
@best_effort_reliable
@catch_server_exceptions
async def listunspent_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.listunspent', [sh])
@best_effort_reliable
@catch_server_exceptions
async def get_balance_for_scripthash(self, sh: str) -> dict:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.get_balance', [sh])
@best_effort_reliable
@catch_server_exceptions
async def request_protx_diff(self, *, timeout=None) -> dict:
mn_list = self.mn_list
base_height = mn_list.protx_height
height = self.get_local_height()
if not height or height <= base_height:
return
activation_height = constants.net.DIP3_ACTIVATION_HEIGHT
if base_height <= 1:
if base_height == 0: # on protx diff first allowed height is 1
base_height = 1
if height > activation_height:
height = activation_height + 1
elif height - base_height > CHUNK_SIZE:
height = mn_list.calc_max_height(base_height, height)
try:
params = (base_height, height)
mn_list.sent_protx_diff.put_nowait(params)
except asyncio.QueueFull:
self.logger.info('ignore excess protx diff request')
return
try:
res = None
err = None
s = self.interface.session
res = await s.send_request('protx.diff', params, timeout=timeout)
except asyncio.TimeoutError:
err = f'request_protx_diff(), params={params}: timeout'
except asyncio.CancelledError:
err = f'request_protx_diff(), params={params}: cancelled'
except Exception as e:
err = f'request_protx_diff(), params={params}: {repr(e)}'
self.trigger_callback('protx-diff', {'error': err,
'result': res,
'params': params})
@best_effort_reliable
@catch_server_exceptions
async def request_protx_info(self, protx_hash: str,*, timeout=None):
'''
Request detailed information about a deterministic masternode.
protx_hash: The hash of the initial ProRegTx
'''
if not is_hash256_str(protx_hash):
raise Exception(f"{repr(protx_hash)} is not a txid")
try:
err = None
res = await self.interface.session.send_request('protx.info',
[protx_hash],
timeout=timeout)
except Exception as e:
err = str(e)
res = None
with self.protx_info_resp_lock:
self.protx_info_resp.insert(0, {'error': err,
'result': res})
self.notify('protx-info')
def blockchain(self) -> Blockchain:
interface = self.interface
if interface and interface.blockchain is not None:
self._blockchain = interface.blockchain
return self._blockchain
def get_blockchains(self):
out = {} # blockchain_id -> list(interfaces)
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
with self.interfaces_lock: interfaces_values = list(self.interfaces.values())
for chain_id, bc in blockchain_items:
r = list(filter(lambda i: i.blockchain==bc, interfaces_values))
if r:
out[chain_id] = r
return out
def _set_preferred_chain(self, chain: Blockchain):
height = chain.get_max_forkpoint()
header_hash = chain.get_hash(height)
self._blockchain_preferred_block = {
'height': height,
'hash': header_hash,
}
self.config.set_key('blockchain_preferred_block', self._blockchain_preferred_block)
async def follow_chain_given_id(self, chain_id: str) -> None:
bc = blockchain.blockchains.get(chain_id)
if not bc:
raise Exception('blockchain {} not found'.format(chain_id))
self._set_preferred_chain(bc)
# select server on this chain
with self.interfaces_lock: interfaces = list(self.interfaces.values())
interfaces_on_selected_chain = list(filter(lambda iface: iface.blockchain == bc, interfaces))
if len(interfaces_on_selected_chain) == 0: return
chosen_iface = random.choice(interfaces_on_selected_chain)
# switch to server (and save to config)
net_params = self.get_parameters()
host, port, protocol = deserialize_server(chosen_iface.server)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
await self.set_parameters(net_params)
async def follow_chain_given_server(self, server_str: str) -> None:
# note that server_str should correspond to a connected interface
iface = self.interfaces.get(server_str)
if iface is None:
return
self._set_preferred_chain(iface.blockchain)
# switch to server (and save to config)
net_params = self.get_parameters()
host, port, protocol = deserialize_server(server_str)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
await self.set_parameters(net_params)
def get_local_height(self):
return self.blockchain().height()
def export_checkpoints(self, path):
"""Run manually to generate blockchain checkpoints.
Kept for console use only.
"""
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
async def _start(self):
assert not self.main_taskgroup
self.main_taskgroup = main_taskgroup = SilentTaskGroup()
assert not self.interface and not self.interfaces
assert not self.connecting and not self.server_queue
self.logger.info('starting network')
self.disconnected_servers = set([])
self.protocol = deserialize_server(self.default_server)[2]
self.server_queue = queue.Queue()
self._set_proxy(deserialize_proxy(self.config.get('proxy')))
self._set_oneserver(self.config.get('oneserver', False))
self._start_interface(self.default_server)
async def main():
try:
await self._init_headers_file()
# note: if a task finishes with CancelledError, that
# will NOT raise, and the group will keep the other tasks running
async with main_taskgroup as group:
await group.spawn(self._maintain_sessions())
await group.spawn(self._gather_protx_info())
[await group.spawn(job) for job in self._jobs]
except Exception as e:
self.logger.exception('')
raise e
asyncio.run_coroutine_threadsafe(main(), self.asyncio_loop)
self.trigger_callback('network_updated')
def start(self, jobs: List=None):
self._jobs = jobs or []
asyncio.run_coroutine_threadsafe(self._start(), self.asyncio_loop)
self.axe_net.start()
self.mn_list.start()
@log_exceptions
async def _stop(self, full_shutdown=False):
self.logger.info("stopping network")
try:
await asyncio.wait_for(self.main_taskgroup.cancel_remaining(), timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError) as e:
self.logger.info(f"exc during main_taskgroup cancellation: {repr(e)}")
self.main_taskgroup = None # type: TaskGroup
self.interface = None # type: Interface
self.interfaces = {} # type: Dict[str, Interface]
self.connecting.clear()
self.server_queue = None
if not full_shutdown:
self.trigger_callback('network_updated')
def stop(self):
assert self._loop_thread != threading.current_thread(), 'must not be called from network thread'
self.mn_list.stop()
fut = asyncio.run_coroutine_threadsafe(self._stop(full_shutdown=True), self.asyncio_loop)
try:
fut.result(timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError): pass
self.axe_net.stop()
async def _ensure_there_is_a_main_interface(self):
if self.is_connected():
return
now = time.time()
# if auto_connect is set, try a different server
if self.auto_connect and not self.is_connecting():
await self._switch_to_random_interface()
# if auto_connect is not set, or still no main interface, retry current
if not self.is_connected() and not self.is_connecting():
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
await self.switch_to_interface(self.default_server)
async def _maintain_sessions(self):
async def launch_already_queued_up_new_interfaces():
while self.server_queue.qsize() > 0:
server = self.server_queue.get()
await self.main_taskgroup.spawn(self._run_new_interface(server))
async def maybe_queue_new_interfaces_to_be_launched_later():
now = time.time()
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
# FIXME this should try to honour "healthy spread of connected servers"
self._start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.logger.info('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
async def maintain_healthy_spread_of_connected_servers():
with self.interfaces_lock: interfaces = list(self.interfaces.values())
random.shuffle(interfaces)
for iface in interfaces:
if not self.check_interface_against_healthy_spread_of_connected_servers(iface):
self.logger.info(f"disconnecting from {iface.server}. too many connected "
f"servers already in bucket {iface.bucket_based_on_ipaddress()}")
await self._close_interface(iface)
async def maintain_main_interface():
await self._ensure_there_is_a_main_interface()
if self.is_connected():
if self.config.is_fee_estimates_update_required():
await self.interface.group.spawn(self._request_fee_estimates, self.interface)
while True:
try:
await launch_already_queued_up_new_interfaces()
await maybe_queue_new_interfaces_to_be_launched_later()
await maintain_healthy_spread_of_connected_servers()
await maintain_main_interface()
except asyncio.CancelledError:
# suppress spurious cancellations
group = self.main_taskgroup
if not group or group._closed:
raise
await asyncio.sleep(0.1)
async def _gather_protx_info(self):
mn_list = self.mn_list
while mn_list.protx_loading: # start after protx diffs loaded
await asyncio.sleep(1)
loop = self.asyncio_loop
get_hashes = await loop.run_in_executor(None, mn_list.process_info)
last_process_time = time.time()
while True:
if not get_hashes:
await asyncio.sleep(60)
for h in get_hashes:
try:
await self.request_protx_info(h)
except Exception as e:
self.logger.info(f'_gather_protx_info error {str(e)}')
if time.time() - last_process_time > 60:
break
await asyncio.sleep(0.1)
get_hashes = await loop.run_in_executor(None, mn_list.process_info)
last_process_time = time.time()
await asyncio.sleep(0.1)
@classmethod
async def _send_http_on_proxy(cls, method: str, url: str, params: str = None,
body: bytes = None, json: dict = None, headers=None,
on_finish=None, timeout=None):
async def default_on_finish(resp: ClientResponse):
resp.raise_for_status()
return await resp.text()
if headers is None:
headers = {}
if on_finish is None:
on_finish = default_on_finish
network = cls.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy, timeout=timeout) as session:
if method == 'get':
async with session.get(url, params=params, headers=headers) as resp:
return await on_finish(resp)
elif method == 'post':
assert body is not None or json is not None, 'body or json must be supplied if method is post'
if body is not None:
async with session.post(url, data=body, headers=headers) as resp:
return await on_finish(resp)
elif json is not None:
async with session.post(url, json=json, headers=headers) as resp:
return await on_finish(resp)
else:
assert False
@classmethod
def send_http_on_proxy(cls, method, url, **kwargs):
network = cls.get_instance()
if network:
assert network._loop_thread is not threading.currentThread()
loop = network.asyncio_loop
else:
loop = asyncio.get_event_loop()
coro = asyncio.run_coroutine_threadsafe(cls._send_http_on_proxy(method, url, **kwargs), loop)
# note: _send_http_on_proxy has its own timeout, so no timeout here:
return coro.result()
@classmethod
def detect_tor_proxy(cls, proxy=None):
detected = None
tor_ip = '127.0.0.1'
tor_ports = [9050, 9150]
proxies = [('socks5', tor_ip, p) for p in tor_ports]
if proxy:
try:
psplit = proxy.split(':')[:3]
proxies.insert(0, (psplit[0], psplit[1], int(psplit[2])))
except:
pass
if hasattr(socket, "_socketobject"):
s = socket._socketobject(socket.AF_INET, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
for p in proxies:
try:
s.connect(p[1:])
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
detected = p
break
except socket.error:
continue
return "%s:%s:%s::" % detected if detected else None
def proxy_is_tor(self, proxy):
if proxy is None:
return False
if hasattr(socket, "_socketobject"):
s = socket._socketobject(socket.AF_INET, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
proxy_host = proxy.get('host', None)
proxy_port = int(proxy.get('port', -1))
if proxy_host is None or proxy_port < 0:
return False
try:
s.settimeout(0.1)
s.connect((proxy_host, proxy_port))
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
return False
return False
# methods used in scripts
async def get_peers(self):
while not self.is_connected():
await asyncio.sleep(1)
session = self.interface.session
return parse_servers(await session.send_request('server.peers.subscribe'))
async def send_multiple_requests(self, servers: List[str], method: str, params: Sequence):
responses = dict()
async def get_response(server):
interface = Interface(self, server, self.proxy)
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
await interface.close()
return
try:
res = await interface.session.send_request(method, params, timeout=10)
except Exception as e:
res = e
responses[interface.server] = res
async with TaskGroup() as group:
for server in servers:
await group.spawn(get_response(server))
return responses
| python | 66,208 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from cassandra.datastax.cloud import parse_metadata_info
from cassandra.query import SimpleStatement
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import six
from ssl import SSLContext, PROTOCOL_TLSv1
from cassandra import DriverException, ConsistencyLevel, InvalidRequest
from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster
from cassandra.connection import SniEndPoint
from cassandra.auth import PlainTextAuthProvider
from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy
from mock import patch
from tests.integration import requirescloudproxy
from tests.integration.util import wait_until_not_raised
from tests.integration.advanced.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER
DISALLOWED_CONSISTENCIES = [
ConsistencyLevel.ANY,
ConsistencyLevel.ONE,
ConsistencyLevel.LOCAL_ONE
]
@requirescloudproxy
class CloudTests(CloudProxyCluster):
def hosts_up(self):
return [h for h in self.cluster.metadata.all_hosts() if h.is_up]
def test_resolve_and_connect(self):
self.connect(self.creds)
self.assertEqual(len(self.hosts_up()), 3)
for host in self.cluster.metadata.all_hosts():
self.assertTrue(host.is_up)
self.assertIsInstance(host.endpoint, SniEndPoint)
self.assertEqual(str(host.endpoint), "{}:{}:{}".format(
host.endpoint.address, host.endpoint.port, host.host_id))
self.assertIn(host.endpoint._resolved_address, ("127.0.0.1", '::1'))
def test_match_system_local(self):
self.connect(self.creds)
self.assertEqual(len(self.hosts_up()), 3)
for host in self.cluster.metadata.all_hosts():
row = self.session.execute('SELECT * FROM system.local', host=host).one()
self.assertEqual(row.host_id, host.host_id)
self.assertEqual(row.rpc_address, host.broadcast_rpc_address)
def test_set_auth_provider(self):
self.connect(self.creds)
self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider)
self.assertEqual(self.cluster.auth_provider.username, 'cassandra')
self.assertEqual(self.cluster.auth_provider.password, 'cassandra')
def test_support_leaving_the_auth_unset(self):
with self.assertRaises(NoHostAvailable):
self.connect(self.creds_no_auth)
self.assertIsNone(self.cluster.auth_provider)
def test_support_overriding_auth_provider(self):
try:
self.connect(self.creds, auth_provider=PlainTextAuthProvider('invalid', 'invalid'))
except:
pass # this will fail soon when sni_single_endpoint is updated
self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider)
self.assertEqual(self.cluster.auth_provider.username, 'invalid')
self.assertEqual(self.cluster.auth_provider.password, 'invalid')
def test_error_overriding_ssl_context(self):
with self.assertRaises(ValueError) as cm:
self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLSv1))
self.assertIn('cannot be specified with a cloud configuration', str(cm.exception))
def test_error_overriding_ssl_options(self):
with self.assertRaises(ValueError) as cm:
self.connect(self.creds, ssl_options={'check_hostname': True})
self.assertIn('cannot be specified with a cloud configuration', str(cm.exception))
def _bad_hostname_metadata(self, config, http_data):
config = parse_metadata_info(config, http_data)
config.sni_host = "127.0.0.1"
return config
def test_verify_hostname(self):
with patch('cassandra.datastax.cloud.parse_metadata_info', wraps=self._bad_hostname_metadata):
with self.assertRaises(NoHostAvailable) as e:
self.connect(self.creds)
self.assertIn("hostname", str(e.exception).lower())
def test_error_when_bundle_doesnt_exist(self):
try:
self.connect('/invalid/path/file.zip')
except Exception as e:
if six.PY2:
self.assertIsInstance(e, IOError)
else:
self.assertIsInstance(e, FileNotFoundError)
def test_load_balancing_policy_is_dcawaretokenlbp(self):
self.connect(self.creds)
self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy,
TokenAwarePolicy)
self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy._child_policy,
DCAwareRoundRobinPolicy)
def test_resolve_and_reconnect_on_node_down(self):
self.connect(self.creds,
idle_heartbeat_interval=1, idle_heartbeat_timeout=1,
reconnection_policy=ConstantReconnectionPolicy(120))
self.assertEqual(len(self.hosts_up()), 3)
CLOUD_PROXY_SERVER.stop_node(1)
wait_until_not_raised(
lambda: self.assertEqual(len(self.hosts_up()), 2),
0.02, 250)
host = [h for h in self.cluster.metadata.all_hosts() if not h.is_up][0]
with patch.object(SniEndPoint, "resolve", wraps=host.endpoint.resolve) as mocked_resolve:
CLOUD_PROXY_SERVER.start_node(1)
wait_until_not_raised(
lambda: self.assertEqual(len(self.hosts_up()), 3),
0.02, 250)
mocked_resolve.assert_called_once()
def test_metadata_unreachable(self):
with self.assertRaises(DriverException) as cm:
self.connect(self.creds_unreachable, connect_timeout=1)
self.assertIn('Unable to connect to the metadata service', str(cm.exception))
def test_metadata_ssl_error(self):
with self.assertRaises(DriverException) as cm:
self.connect(self.creds_invalid_ca)
self.assertIn('Unable to connect to the metadata', str(cm.exception))
def test_default_consistency(self):
self.connect(self.creds)
self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM)
self.assertEqual(self.cluster.profile_manager.default.consistency_level, ConsistencyLevel.LOCAL_QUORUM)
def test_default_consistency_of_execution_profiles(self):
cloud_config = {'secure_connect_bundle': self.creds}
self.cluster = Cluster(cloud=cloud_config, protocol_version=4, execution_profiles={
'pre_create_default_ep': ExecutionProfile(),
'pre_create_changed_ep': ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
),
})
self.cluster.add_execution_profile('pre_connect_default_ep', ExecutionProfile())
self.cluster.add_execution_profile(
'pre_connect_changed_ep',
ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
)
)
session = self.cluster.connect(wait_for_all_pools=True)
self.cluster.add_execution_profile('post_connect_default_ep', ExecutionProfile())
self.cluster.add_execution_profile(
'post_connect_changed_ep',
ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
)
)
for default in ['pre_create_default_ep', 'pre_connect_default_ep', 'post_connect_default_ep']:
cl = self.cluster.profile_manager.profiles[default].consistency_level
self.assertEqual(
cl, ConsistencyLevel.LOCAL_QUORUM,
"Expecting LOCAL QUORUM for profile {}, but got {} instead".format(default, cl)
)
for changed in ['pre_create_changed_ep', 'pre_connect_changed_ep', 'post_connect_changed_ep']:
cl = self.cluster.profile_manager.profiles[changed].consistency_level
self.assertEqual(
cl, ConsistencyLevel.LOCAL_ONE,
"Expecting LOCAL ONE for profile {}, but got {} instead".format(default, cl)
)
def test_consistency_guardrails(self):
self.connect(self.creds)
self.session.execute(
"CREATE KEYSPACE IF NOT EXISTS test_consistency_guardrails "
"with replication={'class': 'SimpleStrategy', 'replication_factor': 1}"
)
self.session.execute("CREATE TABLE IF NOT EXISTS test_consistency_guardrails.guardrails (id int primary key)")
for consistency in DISALLOWED_CONSISTENCIES:
statement = SimpleStatement(
"INSERT INTO test_consistency_guardrails.guardrails (id) values (1)",
consistency_level=consistency
)
with self.assertRaises(InvalidRequest) as e:
self.session.execute(statement)
self.assertIn('not allowed for Write Consistency Level', str(e.exception))
# Sanity check to make sure we can do a normal insert
statement = SimpleStatement(
"INSERT INTO test_consistency_guardrails.guardrails (id) values (1)",
consistency_level=ConsistencyLevel.LOCAL_QUORUM
)
try:
self.session.execute(statement)
except InvalidRequest:
self.fail("InvalidRequest was incorrectly raised for write query at LOCAL QUORUM!")
| python | 9,894 |
import os
from django.conf import settings
from django.contrib.gis.db.models.functions import Transform
from rest_framework import serializers
from rest_framework_gis import serializers as geo_serializers
from geotrek.api.mobile.serializers.tourism import InformationDeskSerializer
from geotrek.api.v2.functions import StartPoint, EndPoint
from geotrek.zoning.models import City, District
if 'geotrek.trekking' in settings.INSTALLED_APPS:
from geotrek.trekking import models as trekking_models
class POIListSerializer(geo_serializers.GeoFeatureModelSerializer):
pictures = serializers.SerializerMethodField()
geometry = geo_serializers.GeometryField(read_only=True, precision=7, source='geom2d_transformed')
type = serializers.ReadOnlyField(source='type.pk')
def get_pictures(self, obj):
if not obj.resized_pictures:
return []
root_pk = self.context.get('root_pk') or obj.pk
return obj.serializable_pictures_mobile(root_pk)
class Meta:
model = trekking_models.POI
id_field = 'pk'
geo_field = 'geometry'
fields = (
'id', 'pk', 'pictures', 'name', 'description', 'type', 'geometry',
)
class TrekBaseSerializer(geo_serializers.GeoFeatureModelSerializer):
cities = serializers.SerializerMethodField()
districts = serializers.SerializerMethodField()
length = serializers.FloatField(source='length_2d_display')
departure_city = serializers.SerializerMethodField()
def get_cities(self, obj):
qs = City.objects.filter(published=True)
cities = qs.filter(geom__intersects=(obj.geom, 0))
return cities.values_list('code', flat=True)
def get_departure_city(self, obj):
qs = City.objects.filter(published=True)
if obj.start_point:
city = qs.filter(geom__covers=(obj.start_point, 0)).first()
if city:
return city.code
return None
def get_districts(self, obj):
qs = District.objects.filter(published=True)
districts = qs.filter(geom__intersects=(obj.geom, 0))
return [district.pk for district in districts]
class Meta:
model = trekking_models.Trek
id_field = 'pk'
geo_field = 'geometry'
class TrekListSerializer(TrekBaseSerializer):
first_picture = serializers.SerializerMethodField()
geometry = geo_serializers.GeometryField(read_only=True, precision=7, source='start_point', )
def get_first_picture(self, obj):
root_pk = self.context.get('root_pk') or obj.pk
return obj.resized_picture_mobile(root_pk)
class Meta(TrekBaseSerializer.Meta):
fields = (
'id', 'pk', 'first_picture', 'name', 'departure', 'accessibilities', 'route', 'departure_city',
'difficulty', 'practice', 'themes', 'length', 'geometry', 'districts', 'cities', 'duration', 'ascent',
'descent',
)
class TrekDetailSerializer(TrekBaseSerializer):
geometry = geo_serializers.GeometryField(read_only=True, precision=7, source='geom2d_transformed')
pictures = serializers.SerializerMethodField()
arrival_city = serializers.SerializerMethodField()
information_desks = serializers.SerializerMethodField()
parking_location = serializers.SerializerMethodField()
profile = serializers.SerializerMethodField()
points_reference = serializers.SerializerMethodField()
children = serializers.SerializerMethodField()
def get_pictures(self, obj):
root_pk = self.context.get('root_pk') or obj.pk
return obj.serializable_pictures_mobile(root_pk)
def get_children(self, obj):
children = obj.children.all().annotate(start_point=Transform(StartPoint('geom'), settings.API_SRID),
end_point=Transform(EndPoint('geom'), settings.API_SRID))
serializer_children = TrekListSerializer(children, many=True, context={'root_pk': obj.pk})
return serializer_children.data
def get_points_reference(self, obj):
if not obj.points_reference:
return None
return obj.points_reference.transform(settings.API_SRID, clone=True).coords
def get_parking_location(self, obj):
if not obj.parking_location:
return None
return obj.parking_location.transform(settings.API_SRID, clone=True).coords
def get_arrival_city(self, obj):
qs = City.objects.all()
if obj.end_point:
city = qs.filter(geom__covers=(obj.end_point, 0)).first()
if city:
return city.code
return None
def get_information_desks(self, obj):
return [
InformationDeskSerializer(information_desk, context={'root_pk': obj.pk}).data
for information_desk in obj.information_desks.all()
]
def get_profile(self, obj):
root_pk = self.context.get('root_pk') or obj.pk
return os.path.join("/", str(root_pk), settings.MEDIA_URL.lstrip('/'), obj.get_elevation_chart_url_png())
class Meta(TrekBaseSerializer.Meta):
auto_bbox = True
fields = (
'id', 'pk', 'name', 'slug', 'accessibilities', 'description_teaser', 'cities', 'profile',
'description', 'departure', 'arrival', 'duration', 'access', 'advised_parking', 'advice',
'difficulty', 'length', 'ascent', 'descent', 'route', 'labels', 'parking_location',
'min_elevation', 'max_elevation', 'themes', 'networks', 'practice', 'difficulty',
'geometry', 'pictures', 'information_desks', 'cities', 'departure_city', 'arrival_city',
'points_reference', 'districts', 'ambiance', 'children',
)
| python | 6,106 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid BigText fonts
# Copyright (C) 2004-2006 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from urwid.escape import SAFE_ASCII_DEC_SPECIAL_RE
from urwid.util import apply_target_encoding, str_util
from urwid.canvas import TextCanvas
def separate_glyphs(gdata, height):
"""return (dictionary of glyphs, utf8 required)"""
gl = gdata.split("\n")
del gl[0]
del gl[-1]
for g in gl:
assert "\t" not in g
assert len(gl) == height+1, repr(gdata)
key_line = gl[0]
del gl[0]
c = None # current character
key_index = 0 # index into character key line
end_col = 0 # column position at end of glyph
start_col = 0 # column position at start of glyph
jl = [0]*height # indexes into lines of gdata (gl)
dout = {}
utf8_required = False
while True:
if c is None:
if key_index >= len(key_line):
break
c = key_line[key_index]
if key_index < len(key_line) and key_line[key_index] == c:
end_col += str_util.get_width(ord(c))
key_index += 1
continue
out = []
for k in range(height):
l = gl[k]
j = jl[k]
y = 0
fill = 0
while y < end_col - start_col:
if j >= len(l):
fill = end_col - start_col - y
break
y += str_util.get_width(ord(l[j]))
j += 1
assert y + fill == end_col - start_col, \
repr((y, fill, end_col))
segment = l[jl[k]:j]
if not SAFE_ASCII_DEC_SPECIAL_RE.match(segment):
utf8_required = True
out.append(segment + " " * fill)
jl[k] = j
start_col = end_col
dout[c] = (y + fill, out)
c = None
return dout, utf8_required
_all_fonts = []
def get_all_fonts():
"""
Return a list of (font name, font class) tuples.
"""
return _all_fonts[:]
def add_font(name, cls):
_all_fonts.append((name, cls))
class Font(object):
def __init__(self):
assert self.height
assert self.data
self.char = {}
self.canvas = {}
self.utf8_required = False
for gdata in self.data:
self.add_glyphs(gdata)
def add_glyphs(self, gdata):
d, utf8_required = separate_glyphs(gdata, self.height)
self.char.update(d)
self.utf8_required |= utf8_required
def characters(self):
l = self.char.keys()
l.sort()
return "".join(l)
def char_width(self, c):
if c in self.char:
return self.char[c][0]
return 0
def char_data(self, c):
return self.char[c][1]
def render(self, c):
if c in self.canvas:
return self.canvas[c]
width, l = self.char[c]
tl = []
csl = []
for d in l:
t, cs = apply_target_encoding(d)
tl.append(t)
csl.append(cs)
canv = TextCanvas(tl, None, csl, maxcol=width,
check_width=False)
self.canvas[c] = canv
return canv
#safe_palette = u"┘┐┌└┼─├┤┴┬│"
#more_palette = u"═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬○"
#block_palette = u"▄#█#▀#▌#▐#▖#▗#▘#▙#▚#▛#▜#▝#▞#▟"
class Thin3x3Font(Font):
height = 3
data = [u"""
000111222333444555666777888999 !
┌─┐ ┐ ┌─┐┌─┐ ┐┌─ ┌─ ┌─┐┌─┐┌─┐ │
│ │ │ ┌─┘ ─┤└─┼└─┐├─┐ ┼├─┤└─┤ │
└─┘ ┴ └─ └─┘ ┴ ─┘└─┘ ┴└─┘ ─┘ .
""", ur"""
"###$$$%%%'*++,--.///:;==???[[\\\]]^__`
" ┼┼┌┼┐O /' /.. _┌─┐┌ \ ┐^ `
┼┼└┼┐ / * ┼ ─ / ., _ ┌┘│ \ │
└┼┘/ O , ./ . └ \ ┘ ──
"""]
add_font("Thin 3x3",Thin3x3Font)
class Thin4x3Font(Font):
height = 3
data = Thin3x3Font.data + [u"""
0000111122223333444455556666777788889999 ####$$$$
┌──┐ ┐ ┌──┐┌──┐ ┐┌── ┌── ┌──┐┌──┐┌──┐ ┼─┼┌┼┼┐
│ │ │ ┌──┘ ─┤└──┼└──┐├──┐ ┼├──┤└──┤ ┼─┼└┼┼┐
└──┘ ┴ └── └──┘ ┴ ──┘└──┘ ┴└──┘ ──┘ └┼┼┘
"""]
add_font("Thin 4x3",Thin4x3Font)
class HalfBlock5x4Font(Font):
height = 4
data = [u"""
00000111112222233333444445555566666777778888899999 !!
▄▀▀▄ ▄█ ▄▀▀▄ ▄▀▀▄ ▄ █ █▀▀▀ ▄▀▀ ▀▀▀█ ▄▀▀▄ ▄▀▀▄ █
█ █ █ ▄▀ ▄▀ █▄▄█ █▄▄ █▄▄ ▐▌ ▀▄▄▀ ▀▄▄█ █
█ █ █ ▄▀ ▄ █ █ █ █ █ █ █ █ █ ▀
▀▀ ▀▀▀ ▀▀▀▀ ▀▀ ▀ ▀▀▀ ▀▀ ▀ ▀▀ ▀▀ ▀
""", u'''
"""######$$$$$$%%%%%&&&&&((()))******++++++,,,-----..////:::;;
█▐▌ █ █ ▄▀█▀▄ ▐▌▐▌ ▄▀▄ █ █ ▄ ▄ ▄ ▐▌
▀█▀█▀ ▀▄█▄ █ ▀▄▀ ▐▌ ▐▌ ▄▄█▄▄ ▄▄█▄▄ ▄▄▄▄ █ ▀ ▀
▀█▀█▀ ▄ █ █ ▐▌▄ █ ▀▄▌▐▌ ▐▌ ▄▀▄ █ ▐▌ ▀ ▄▀
▀ ▀ ▀▀▀ ▀ ▀ ▀▀ ▀ ▀ ▄▀ ▀ ▀
''', ur"""
<<<<<=====>>>>>?????@@@@@@[[[[\\\\]]]]^^^^____```{{{{||}}}}~~~~''´´´
▄▀ ▀▄ ▄▀▀▄ ▄▀▀▀▄ █▀▀ ▐▌ ▀▀█ ▄▀▄ ▀▄ ▄▀ █ ▀▄ ▄ █ ▄▀
▄▀ ▀▀▀▀ ▀▄ ▄▀ █ █▀█ █ █ █ ▄▀ █ ▀▄ ▐▐▌▌
▀▄ ▀▀▀▀ ▄▀ ▀ █ ▀▀▀ █ ▐▌ █ █ █ █ ▀
▀ ▀ ▀ ▀▀▀ ▀▀▀ ▀ ▀▀▀ ▀▀▀▀ ▀ ▀ ▀
""", u'''
AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGGGGGHHHHHIIJJJJJKKKKK
▄▀▀▄ █▀▀▄ ▄▀▀▄ █▀▀▄ █▀▀▀ █▀▀▀ ▄▀▀▄ █ █ █ █ █ █
█▄▄█ █▄▄▀ █ █ █ █▄▄ █▄▄ █ █▄▄█ █ █ █▄▀
█ █ █ █ █ ▄ █ █ █ █ █ ▀█ █ █ █ ▄ █ █ ▀▄
▀ ▀ ▀▀▀ ▀▀ ▀▀▀ ▀▀▀▀ ▀ ▀▀ ▀ ▀ ▀ ▀▀ ▀ ▀
''', u'''
LLLLLMMMMMMNNNNNOOOOOPPPPPQQQQQRRRRRSSSSSTTTTT
█ █▄ ▄█ ██ █ ▄▀▀▄ █▀▀▄ ▄▀▀▄ █▀▀▄ ▄▀▀▄ ▀▀█▀▀
█ █ ▀ █ █▐▌█ █ █ █▄▄▀ █ █ █▄▄▀ ▀▄▄ █
█ █ █ █ ██ █ █ █ █ ▌█ █ █ ▄ █ █
▀▀▀▀ ▀ ▀ ▀ ▀ ▀▀ ▀ ▀▀▌ ▀ ▀ ▀▀ ▀
''', u'''
UUUUUVVVVVVWWWWWWXXXXXXYYYYYYZZZZZ
█ █ █ █ █ █ █ █ █ █ ▀▀▀█
█ █ ▐▌ ▐▌ █ ▄ █ ▀▄▀ ▀▄▀ ▄▀
█ █ █ █ ▐▌█▐▌ ▄▀ ▀▄ █ █
▀▀ ▀ ▀ ▀ ▀ ▀ ▀ ▀▀▀▀
''', u'''
aaaaabbbbbcccccdddddeeeeeffffggggghhhhhiijjjjkkkkk
█ █ ▄▀▀ █ ▄ ▄ █
▀▀▄ █▀▀▄ ▄▀▀▄ ▄▀▀█ ▄▀▀▄ ▀█▀ ▄▀▀▄ █▀▀▄ ▄ ▄ █ ▄▀
▄▀▀█ █ █ █ ▄ █ █ █▀▀ █ ▀▄▄█ █ █ █ █ █▀▄
▀▀▀ ▀▀▀ ▀▀ ▀▀▀ ▀▀ ▀ ▄▄▀ ▀ ▀ ▀ ▄▄▀ ▀ ▀
''', u'''
llmmmmmmnnnnnooooopppppqqqqqrrrrssssstttt
█ █
█ █▀▄▀▄ █▀▀▄ ▄▀▀▄ █▀▀▄ ▄▀▀█ █▀▀ ▄▀▀▀ ▀█▀
█ █ █ █ █ █ █ █ █ █ █ █ █ ▀▀▄ █
▀ ▀ ▀ ▀ ▀ ▀▀ █▀▀ ▀▀█ ▀ ▀▀▀ ▀
''', u'''
uuuuuvvvvvwwwwwwxxxxxxyyyyyzzzzz
█ █ █ █ █ ▄ █ ▀▄ ▄▀ █ █ ▀▀█▀
█ █ ▐▌▐▌ ▐▌█▐▌ ▄▀▄ ▀▄▄█ ▄▀
▀▀ ▀▀ ▀ ▀ ▀ ▀ ▄▄▀ ▀▀▀▀
''']
add_font("Half Block 5x4",HalfBlock5x4Font)
class HalfBlock6x5Font(Font):
height = 5
data = [u"""
000000111111222222333333444444555555666666777777888888999999 ..::////
▄▀▀▀▄ ▄█ ▄▀▀▀▄ ▄▀▀▀▄ ▄ █ █▀▀▀▀ ▄▀▀▀ ▀▀▀▀█ ▄▀▀▀▄ ▄▀▀▀▄ █
█ █ █ █ █ █ █ █ █ ▐▌ █ █ █ █ ▀ ▐▌
█ █ █ ▄▀ ▀▀▄ ▀▀▀█▀ ▀▀▀▀▄ █▀▀▀▄ █ ▄▀▀▀▄ ▀▀▀█ ▄ █
█ █ █ ▄▀ ▄ █ █ █ █ █ ▐▌ █ █ █ ▐▌
▀▀▀ ▀▀▀ ▀▀▀▀▀ ▀▀▀ ▀ ▀▀▀▀ ▀▀▀ ▀ ▀▀▀ ▀▀▀ ▀ ▀
"""]
add_font("Half Block 6x5",HalfBlock6x5Font)
class HalfBlockHeavy6x5Font(Font):
height = 5
data = [u"""
000000111111222222333333444444555555666666777777888888999999 ..::////
▄███▄ ▐█▌ ▄███▄ ▄███▄ █▌ █████ ▄███▄ █████ ▄███▄ ▄███▄ █▌
█▌ ▐█ ▀█▌ ▀ ▐█ ▀ ▐█ █▌ █▌ █▌ █▌ █▌ █▌ ▐█ █▌ ▐█ █▌ ▐█
█▌ ▐█ █▌ ▄█▀ ██▌ █████ ████▄ ████▄ ▐█ ▐███▌ ▀████ █▌
█▌ ▐█ █▌ ▄█▀ ▄ ▐█ █▌ ▐█ █▌ ▐█ █▌ █▌ ▐█ ▐█ █▌▐█
▀███▀ ███▌ █████ ▀███▀ █▌ ████▀ ▀███▀ ▐█ ▀███▀ ▀███▀ █▌ █▌
"""]
add_font("Half Block Heavy 6x5",HalfBlockHeavy6x5Font)
class Thin6x6Font(Font):
height = 6
data = [u"""
000000111111222222333333444444555555666666777777888888999999''
┌───┐ ┐ ┌───┐ ┌───┐ ┐ ┌─── ┌─── ┌───┐ ┌───┐ ┌───┐ │
│ │ │ │ │ ┌ │ │ │ │ │ │ │ │
│ / │ │ ┌───┘ ─┤ └──┼─ └───┐ ├───┐ ┼ ├───┤ └───┤
│ │ │ │ │ │ │ │ │ │ │ │ │
└───┘ ┴ └─── └───┘ ┴ ───┘ └───┘ ┴ └───┘ ───┘
""", ur'''
!! """######$$$$$$%%%%%%&&&&&&((()))******++++++
│ ││ ┌ ┌ ┌─┼─┐ ┌┐ / ┌─┐ / \
│ ─┼─┼─ │ │ └┘ / │ │ │ │ \ / │
│ │ │ └─┼─┐ / ┌─\┘ │ │ ──X── ──┼──
│ ─┼─┼─ │ │ / ┌┐ │ \, │ │ / \ │
. ┘ ┘ └─┼─┘ / └┘ └───\ \ /
''', ur"""
,,-----..//////::;;<<<<=====>>>>??????@@@@@@
/ ┌───┐ ┌───┐
/ . . / ──── \ │ │┌──┤
──── / / \ ┌─┘ ││ │
/ . , \ ──── / │ │└──┘
, . / \ / . └───┘
""", ur"""
[[\\\\\\]]^^^____``{{||}}~~~~~~
┌ \ ┐ /\ \ ┌ │ ┐
│ \ │ │ │ │ ┌─┐
│ \ │ ┤ │ ├ └─┘
│ \ │ │ │ │
└ \ ┘ ──── └ │ ┘
""", u"""
AAAAAABBBBBBCCCCCCDDDDDDEEEEEEFFFFFFGGGGGGHHHHHHIIJJJJJJ
┌───┐ ┬───┐ ┌───┐ ┬───┐ ┬───┐ ┬───┐ ┌───┐ ┬ ┬ ┬ ┬
│ │ │ │ │ │ │ │ │ │ │ │ │ │
├───┤ ├───┤ │ │ │ ├── ├── │ ──┬ ├───┤ │ │
│ │ │ │ │ │ │ │ │ │ │ │ │ │ ┬ │
┴ ┴ ┴───┘ └───┘ ┴───┘ ┴───┘ ┴ └───┘ ┴ ┴ ┴ └───┘
""", u"""
KKKKKKLLLLLLMMMMMMNNNNNNOOOOOOPPPPPPQQQQQQRRRRRRSSSSSS
┬ ┬ ┬ ┌─┬─┐ ┬─┐ ┬ ┌───┐ ┬───┐ ┌───┐ ┬───┐ ┌───┐
│ ┌─┘ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
├─┴┐ │ │ │ │ │ │ │ │ │ ├───┘ │ │ ├─┬─┘ └───┐
│ └┐ │ │ │ │ │ │ │ │ │ │ ┐│ │ └─┐ │
┴ ┴ ┴───┘ ┴ ┴ ┴ └─┴ └───┘ ┴ └──┼┘ ┴ ┴ └───┘
└
""", u"""
TTTTTTUUUUUUVVVVVVWWWWWWXXXXXXYYYYYYZZZZZZ
┌─┬─┐ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┌───┐
│ │ │ │ │ │ │ └┐ ┌┘ │ │ ┌─┘
│ │ │ │ │ │ │ │ ├─┤ └─┬─┘ ┌┘
│ │ │ └┐ ┌┘ │ │ │ ┌┘ └┐ │ ┌┘
┴ └───┘ └─┘ └─┴─┘ ┴ ┴ ┴ └───┘
""", u"""
aaaaaabbbbbbccccccddddddeeeeeefffgggggghhhhhhiijjj
┌─┐
│ │ │ │ . .
┌───┐ ├───┐ ┌───┐ ┌───┤ ┌───┐ ┼ ┌───┐ ├───┐ ┐ ┐
┌───┤ │ │ │ │ │ ├───┘ │ │ │ │ │ │ │
└───┴ └───┘ └───┘ └───┘ └───┘ ┴ └───┤ ┴ ┴ ┴ │
└───┘ ─┘
""", u"""
kkkkkkllmmmmmmnnnnnnooooooppppppqqqqqqrrrrrssssss
│ │
│ ┌─ │ ┬─┬─┐ ┬───┐ ┌───┐ ┌───┐ ┌───┐ ┬──┐ ┌───┐
├─┴┐ │ │ │ │ │ │ │ │ │ │ │ │ │ └───┐
┴ └─ └ ┴ ┴ ┴ ┴ └───┘ ├───┘ └───┤ ┴ └───┘
│ │
""", u"""
ttttuuuuuuvvvvvvwwwwwwxxxxxxyyyyyyzzzzzz
│
─┼─ ┬ ┬ ┬ ┬ ┬ ┬ ─┐ ┌─ ┬ ┬ ────┬
│ │ │ └┐ ┌┘ │ │ │ ├─┤ │ │ ┌───┘
└─ └───┴ └─┘ └─┴─┘ ─┘ └─ └───┤ ┴────
└───┘
"""]
add_font("Thin 6x6",Thin6x6Font)
class HalfBlock7x7Font(Font):
height = 7
data = [u"""
0000000111111122222223333333444444455555556666666777777788888889999999'''
▄███▄ ▐█▌ ▄███▄ ▄███▄ █▌ ▐█████▌ ▄███▄ ▐█████▌ ▄███▄ ▄███▄ ▐█
▐█ █▌ ▀█▌ ▐█ █▌▐█ █▌▐█ █▌ ▐█ ▐█ ▐█ ▐█ █▌▐█ █▌▐█
▐█ ▐ █▌ █▌ █▌ ▐██ ▐█████▌▐████▄ ▐████▄ █▌ █████ ▀████▌
▐█ ▌ █▌ █▌ ▄█▀ █▌ █▌ █▌▐█ █▌ ▐█ ▐█ █▌ █▌
▐█ █▌ █▌ ▄█▀ ▐█ █▌ █▌ █▌▐█ █▌ █▌ ▐█ █▌ █▌
▀███▀ ███▌ ▐█████▌ ▀███▀ █▌ ▐████▀ ▀███▀ ▐█ ▀███▀ ▀███▀
""", u'''
!!! """""#######$$$$$$$%%%%%%%&&&&&&&(((())))*******++++++
▐█ ▐█ █▌ ▐█ █▌ █ ▄ █▌ ▄█▄ █▌▐█ ▄▄ ▄▄
▐█ ▐█ █▌▐█████▌ ▄███▄ ▐█▌▐█ ▐█ █▌ ▐█ █▌ ▀█▄█▀ ▐█
▐█ ▐█ █▌ ▐█▄█▄▄ ▀ █▌ ███ █▌ ▐█ ▐█████▌ ████▌
▐█ ▐█████▌ ▀▀█▀█▌ ▐█ ▄ ███▌▄ █▌ ▐█ ▄█▀█▄ ▐█
▐█ █▌ ▀███▀ █▌▐█▌▐█ █▌ ▐█ █▌ ▀▀ ▀▀
▐█ █ ▐█ ▀ ▀██▀█▌ █▌▐█
''', u"""
,,,------.../////:::;;;<<<<<<<======>>>>>>>???????@@@@@@@
█▌ ▄█▌ ▐█▄ ▄███▄ ▄███▄
▐█ ▐█ ▐█ ▄█▀ ▐████▌ ▀█▄ ▐█ █▌▐█ ▄▄█▌
▐████▌ █▌ ▐██ ██▌ █▌ ▐█▐█▀█▌
▐█ ▐█ ▐█ ▀█▄ ▐████▌ ▄█▀ █▌ ▐█▐█▄█▌
█▌ ▀ ▀█▌ ▐█▀ ▐█ ▀▀▀
▐█ ▐█ ▐█ █▌ ▀███▀
▀
""", ur"""
[[[[\\\\\]]]]^^^^^^^_____```{{{{{|||}}}}}~~~~~~~´´´
▐██▌▐█ ▐██▌ ▐█▌ ▐█ █▌▐█ ▐█ █▌
▐█ █▌ █▌ ▐█ █▌ █▌ █▌ ▐█ ▐█ ▄▄ ▐█
▐█ ▐█ █▌▐█ █▌ ▄█▌ ▐█ ▐█▄ ▐▀▀█▄▄▌
▐█ █▌ █▌ ▀█▌ ▐█ ▐█▀ ▀▀
▐█ ▐█ █▌ █▌ ▐█ ▐█
▐██▌ █▌▐██▌ █████ █▌▐█ ▐█
""", u"""
AAAAAAABBBBBBBCCCCCCCDDDDDDDEEEEEEEFFFFFFFGGGGGGGHHHHHHHIIIIJJJJJJJ
▄███▄ ▐████▄ ▄███▄ ▐████▄ ▐█████▌▐█████▌ ▄███▄ ▐█ █▌ ██▌ █▌
▐█ █▌▐█ █▌▐█ ▐█ █▌▐█ ▐█ ▐█ ▐█ █▌ ▐█ █▌
▐█████▌▐█████ ▐█ ▐█ █▌▐████ ▐████ ▐█ ▐█████▌ ▐█ █▌
▐█ █▌▐█ █▌▐█ ▐█ █▌▐█ ▐█ ▐█ ██▌▐█ █▌ ▐█ █▌
▐█ █▌▐█ █▌▐█ ▐█ █▌▐█ ▐█ ▐█ █▌▐█ █▌ ▐█ ▐█ █▌
▐█ █▌▐████▀ ▀███▀ ▐████▀ ▐█████▌▐█ ▀███▀ ▐█ █▌ ██▌ ▀███▀
""", u"""
KKKKKKKLLLLLLLMMMMMMMMNNNNNNNOOOOOOOPPPPPPPQQQQQQQRRRRRRRSSSSSSS
▐█ █▌▐█ ▄█▌▐█▄ ▐██ █▌ ▄███▄ ▐████▄ ▄███▄ ▐████▄ ▄███▄
▐█ █▌ ▐█ ▐█ ▐▌ █▌▐██▌ █▌▐█ █▌▐█ █▌▐█ █▌▐█ █▌▐█
▐█▄█▌ ▐█ ▐█ ▐▌ █▌▐█▐█ █▌▐█ █▌▐████▀ ▐█ █▌▐█████ ▀███▄
▐█▀█▌ ▐█ ▐█ █▌▐█ █▌█▌▐█ █▌▐█ ▐█ █▌▐█ █▌ █▌
▐█ █▌ ▐█ ▐█ █▌▐█ ▐██▌▐█ █▌▐█ ▐█ █▌█▌▐█ █▌ █▌
▐█ █▌▐█████▌▐█ █▌▐█ ██▌ ▀███▀ ▐█ ▀███▀ ▐█ █▌ ▀███▀
▀▀
""", u"""
TTTTTTTUUUUUUUVVVVVVVWWWWWWWWXXXXXXXYYYYYYYZZZZZZZ
█████▌▐█ █▌▐█ █▌▐█ █▌▐█ █▌ █▌ █▌▐█████▌
█▌ ▐█ █▌ █▌ ▐█ ▐█ █▌ ▐█ █▌ ▐█ ▐█ █▌
█▌ ▐█ █▌ ▐█ █▌ ▐█ █▌ ▐█▌ ▐██ █▌
█▌ ▐█ █▌ ███ ▐█ ▐▌ █▌ ███ █▌ █▌
█▌ ▐█ █▌ ▐█▌ ▐█ ▐▌ █▌ █▌ ▐█ █▌ █▌
█▌ ▀███▀ █ ▀█▌▐█▀ ▐█ █▌ █▌ ▐█████▌
""", u"""
aaaaaaabbbbbbbcccccccdddddddeeeeeeefffffggggggghhhhhhhiiijjjj
▐█ █▌ ▄█▌ ▐█ █▌ █▌
▐█ █▌ ▐█ ▐█
▄███▄ ▐████▄ ▄███▄ ▄████▌ ▄███▄ ▐███ ▄███▄ ▐████▄ ▐█▌ ▐█▌
▄▄▄█▌▐█ █▌▐█ ▐█ █▌▐█▄▄▄█▌ ▐█ ▐█ █▌▐█ █▌ █▌ █▌
▐█▀▀▀█▌▐█ █▌▐█ ▐█ █▌▐█▀▀▀ ▐█ ▐█▄▄▄█▌▐█ █▌ █▌ █▌
▀████▌▐████▀ ▀███▀ ▀████▌ ▀███▀ ▐█ ▀▀▀█▌▐█ █▌ █▌ █▌
▀███▀ ▐██
""", u"""
kkkkkkkllllmmmmmmmmnnnnnnnooooooopppppppqqqqqqqrrrrrrsssssss
▐█ ██
▐█ ▐█
▐█ ▄█▌ ▐█ ▄█▌▐█▄ ▐████▄ ▄███▄ ▐████▄ ▄████▌ ▄███▌ ▄███▄
▐█▄█▀ ▐█ ▐█ ▐▌ █▌▐█ █▌▐█ █▌▐█ █▌▐█ █▌▐█ ▐█▄▄▄
▐█▀▀█▄ ▐█ ▐█ ▐▌ █▌▐█ █▌▐█ █▌▐█ █▌▐█ █▌▐█ ▀▀▀█▌
▐█ █▌ ▐█▌▐█ █▌▐█ █▌ ▀███▀ ▐████▀ ▀████▌▐█ ▀███▀
▐█ █▌
""", u"""
tttttuuuuuuuvvvvvvvwwwwwwwwxxxxxxxyyyyyyyzzzzzzz
█▌
█▌
███▌▐█ █▌▐█ █▌▐█ █▌▐█ █▌▐█ █▌▐█████▌
█▌ ▐█ █▌ █▌ ▐█ ▐█ █▌ ▀█▄█▀ ▐█ █▌ ▄█▀
█▌ ▐█ █▌ ███ ▐█ ▐▌ █▌ ▄█▀█▄ ▐█▄▄▄█▌ ▄█▀
█▌ ▀███▀ ▐█▌ ▀█▌▐█▀ ▐█ █▌ ▀▀▀█▌▐█████▌
▀███▀
"""]
add_font("Half Block 7x7",HalfBlock7x7Font)
if __name__ == "__main__":
l = get_all_fonts()
all_ascii = "".join([chr(x) for x in range(32, 127)])
print "Available Fonts: (U) = UTF-8 required"
print "----------------"
for n,cls in l:
f = cls()
u = ""
if f.utf8_required:
u = "(U)"
print ("%-20s %3s " % (n,u)),
c = f.characters()
if c == all_ascii:
print "Full ASCII"
elif c.startswith(all_ascii):
print "Full ASCII + " + c[len(all_ascii):]
else:
print "Characters: " + c
| python | 16,068 |
__author__ = "avh5nm"
if __name__ == "__main__":
print("so tired.") | python | 72 |
from random import random, seed, sample
import numpy as np
import datetime
import time
# import Code.preprocessing as pp
from Code.dynamic_library import method_info
def remove_subject(rsub):
pn_list = list()
for target in rsub:
pn, cn = target.endswith('.csv').spliat('_')
pn_list.append((pn, cn))
return pn_list
def method_base(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = baseDP(param.method, param.model_name, dataset=datasets, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_sn(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = snDP(param.method, param.model_name, dataset=datasets)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_leaveone(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.method == "cropping" or param.method == "convert":
divide_process = LeaveOneDP_ns(param.method, param.model_name, dataset=datasets, rsub=None)
tot_repeat = divide_process.nb_people
if param.datatype == "disease":
divide_process.nb_class += 1
elif param.method == "sleaveone":
divide_process = LeaveOneDP_select(param.method, param.model_name, dataset=datasets, rsub=None)
tot_repeat = 20
else:
divide_process = LeaveOneDP(param.method, param.model_name, dataset=datasets, rsub=None)
tot_repeat = divide_process.nb_people
if param.datatype == "disease":
divide_process.nb_class += 1
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(tot_repeat):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_fa_leaveone(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = LeaveOneDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(divide_process.nb_people):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_mdpi(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpiDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_smdpi(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
divide_process = mdpiDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling(s1=param.collect["select"][0], s2=param.collect["select"][1])
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_dhalf(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpi_dhalfDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_half(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpi_halfDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_MCCV(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = mdpi_MCCVDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(20):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_CV(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
if param.collect["CrossValidation"] == 7:
divide_process = seven_CVDP(param.method, param.model_name, dataset=datasets
, rsub=None)
else:
param.cv_ratio = param.collect["CrossValidation"]
divide_process = select_CVDP(param.method, param.model_name, dataset=datasets
, rsub=None)
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(len(sample_train)):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
def method_vec(param, comb, datasets):
BaseDivideProcess(param.method, param.model_name, dataset=datasets)
if param.datatype == "disease":
BaseDivideProcess.nb_class += 1
divide_process = NotImplemented
sampling_data = divide_process.sampling()
sample_train = sampling_data["train"]
sample_test = sampling_data["test"]
for repeat in range(len(sample_train)):
train = sample_train[repeat]
test = sample_test[repeat]
for nb in range(3):
train[f"data_{nb}"] = divide_process.convert(data=train[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
test[f"data_{nb}"] = divide_process.convert(data=test[f"data_{nb}"],
mt=param.collect["minimum_threshold"], comb=comb)
sample_train[repeat] = train
sample_test[repeat] = test
nb_tag = divide_process.nb_class
nb_people = divide_process.nb_people
return sample_train, sample_test, nb_tag, nb_people
# Base Divide Process Class
class BaseDivideProcess:
def __init__(self, mode, model_name, dataset):
assert len(dataset) == 3, "dataset must be 3 arguments"
data1, data2, data3 = dataset
# [data1, data2, data3] = pp.sort_by_people(dataset)
data1 = data1[data1[:, -2].argsort()]
data2 = data2[data2[:, -2].argsort()]
data3 = data3[data3[:, -2].argsort()]
# sampling func name
self.mode = mode
# used model name
self.model_name = model_name
self.dataset = dataset
self.plabel = data1[:, -2]
self.tlabel = data1[:, -1]
# dataset index
self.data1 = data1[:, :-2]
self.data2 = data2[:, :-2]
self.data3 = data3[:, :-2]
self.nb_class = int(max(self.tlabel))
self.nb_people = int(max(self.plabel)) + 1
def sampling(self):
pass
def convert(self, data, mt, comb):
drow, dcol = data.shape
input_shape = (int(mt * comb), int((dcol) / (mt * comb)))
if self.model_name in method_info['4columns']:
converted = data.reshape(-1, input_shape[0], input_shape[1], 1)
elif self.model_name == "pVGG":
data = data.reshape(-1, input_shape[0], input_shape[1])
converted = np.zeros((data.shape[0], data.shape[1], data.shape[2], 3))
for idx in range(3):
converted[:, :, :, idx] = data
elif self.model_name in method_info['3columns']:
converted = data.reshape(-1, input_shape[0], input_shape[1])
elif self.model_name in method_info['2columns']:
converted = data
elif self.model_name in method_info['specific']:
converted = data
elif self.model_name in method_info['vector']:
converted = data
elif self.model_name in method_info['5columns']:
if input_shape[1] == 6:
converted = data.reshape(-1, input_shape[0], input_shape[1])
left_data = converted[:, :, :3]
right_data = converted[:, :, 3:]
converted = [left_data, right_data]
else:
converted = data.reshape(-1, input_shape[0], input_shape[1])
return converted
# 1000, 1000 sampling Class
class baseDP(BaseDivideProcess):
"""
Sn 600-900 sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
dataset_list = list()
random_list = sample(range(drow), drow)
for dataset in [self.data1, self.data2, self.data3]:
dataset_list.append(dataset[random_list])
targetp = self.plabel[random_list]
targetc = self.tlabel[random_list] - 1
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:1000, :]
test_dict[f"data_{i}"] = dataset[1000:2000, :]
train_dict["people"] = targetp[:1000]
train_dict["tag"] = targetc[:1000]
test_dict["people"] = targetp[1000:2000]
test_dict["tag"] = targetc[1000:2000]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# 600-900 sampling Class
class snDP(BaseDivideProcess):
"""
Sn 600-900 sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
for class_target in range(self.nb_class):
find_idx = []
count_idx = 0
for idx in range(drow):
if self.tlabel[idx] == class_target:
find_idx.append(idx)
count_idx += 1
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
dataset_list.append(target)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
random_list = sample(range(count_idx), count_idx)
for i, target in enumerate(dataset_list):
dataset_list[i] = target[random_list]
targetp = targetp[random_list]
targetc = targetc[random_list]
if class_target == 0:
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:200, :]
test_dict[f"data_{i}"] = dataset[200:, :]
train_dict["people"] = targetp[:200]
train_dict["tag"] = targetc[:200]
test_dict["people"] = targetp[200:]
test_dict["tag"] = targetc[200:]
else:
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = np.vstack([train_dict[f"data_{i}"], dataset[:200, :]])
test_dict[f"data_{i}"] = np.vstack([test_dict[f"data_{i}"], dataset[200:, :]])
train_dict["people"] = np.concatenate([train_dict["people"], targetp[:200]])
train_dict["tag"] = np.concatenate([train_dict["tag"], targetc[:200]])
test_dict["people"] = np.concatenate([test_dict["people"], targetp[200:]])
test_dict["tag"] = np.concatenate([test_dict["tag"], targetc[200:]])
other_samples, _ = train_dict["data_0"].shape
random_list = sample(range(other_samples), 600)
train_dict["people"] = train_dict["people"][random_list]
train_dict["tag"] = train_dict["tag"][random_list]
for i in range(3):
train_dict[f"data_{i}"] = train_dict[f"data_{i}"][random_list]
other_samples, _ = test_dict["data_0"].shape
random_list = sample(range(other_samples), 900)
test_dict["people"] = test_dict["people"][random_list]
test_dict["tag"] = test_dict["tag"][random_list]
for i in range(3):
test_dict[f"data_{i}"] = test_dict[f"data_{i}"][random_list]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class LeaveOneDP(BaseDivideProcess):
"""
LeaveOne sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for peo_target in range(self.nb_people):
train_dict = dict()
test_dict = dict()
dataset_list = list()
train_list = list()
find_idx = []
count_idx = 0
drow, _ = self.data1.shape
for idx in range(drow):
if self.plabel[idx] == peo_target:
find_idx.append(idx)
count_idx += 1
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
if find_idx[0] == 0:
train = dataset[find_idx[-1] + 1:, :]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = dataset[:find_idx[0], :]
temp2 = dataset[find_idx[-1] + 1:, :]
train = np.vstack([temp1, temp2])
elif find_idx[-1] + 1 == drow:
train = dataset[:find_idx[-1] + 1, :]
dataset_list.append(target)
train_list.append(train)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
if find_idx[0] == 0:
trainp = self.plabel[find_idx[-1] + 1:]
trainc = self.tlabel[find_idx[-1] + 1:]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = self.plabel[:find_idx[0]]
temp2 = self.plabel[find_idx[-1] + 1:]
trainp = np.concatenate([temp1, temp2])
temp1 = self.tlabel[:find_idx[0]]
temp2 = self.tlabel[find_idx[-1] + 1:]
trainc = np.concatenate([temp1, temp2])
elif find_idx[-1] + 1 == drow:
trainp = self.plabel[:find_idx[-1] + 1]
trainc = self.tlabel[:find_idx[-1] + 1]
target_indexes, _ = dataset_list[0].shape
train_indexes, _ = train_list[0].shape
random_list1 = sample(range(target_indexes), target_indexes)
random_list2 = sample(range(train_indexes), train_indexes)
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[random_list1]
test_dict["people"] = targetp[random_list1]
test_dict["tag"] = targetc[random_list1]
for i, dataset in enumerate(train_list):
train_dict[f"data_{i}"] = dataset[random_list2]
train_dict["people"] = trainp[random_list2]
train_dict["tag"] = trainc[random_list2]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class no shuffle
class LeaveOneDP_ns(BaseDivideProcess):
"""
LeaveOne sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for peo_target in range(self.nb_people):
train_dict = dict()
test_dict = dict()
dataset_list = list()
train_list = list()
find_idx = []
count_idx = 0
drow, _ = self.data1.shape
for idx in range(drow):
if self.plabel[idx] == peo_target:
find_idx.append(idx)
count_idx += 1
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
if find_idx[0] == 0:
train = dataset[find_idx[-1] + 1:, :]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = dataset[:find_idx[0], :]
temp2 = dataset[find_idx[-1] + 1:, :]
train = np.vstack([temp1, temp2])
elif find_idx[-1] + 1 == drow:
train = dataset[:find_idx[-1] + 1, :]
dataset_list.append(target)
train_list.append(train)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
if find_idx[0] == 0:
trainp = self.plabel[find_idx[-1] + 1:]
trainc = self.tlabel[find_idx[-1] + 1:]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = self.plabel[:find_idx[0]]
temp2 = self.plabel[find_idx[-1] + 1:]
trainp = np.concatenate([temp1, temp2])
temp1 = self.tlabel[:find_idx[0]]
temp2 = self.tlabel[find_idx[-1] + 1:]
trainc = np.concatenate([temp1, temp2])
elif find_idx[-1] + 1 == drow:
trainp = self.plabel[:find_idx[-1] + 1]
trainc = self.tlabel[:find_idx[-1] + 1]
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset
test_dict["people"] = targetp
test_dict["tag"] = targetc
for i, dataset in enumerate(train_list):
train_dict[f"data_{i}"] = dataset
train_dict["people"] = trainp
train_dict["tag"] = trainc
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class LeaveOneDP(BaseDivideProcess):
"""
LeaveOne sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for peo_target in range(self.nb_people):
train_dict = dict()
test_dict = dict()
dataset_list = list()
train_list = list()
find_idx = []
count_idx = 0
drow, _ = self.data1.shape
for idx in range(drow):
if self.plabel[idx] == peo_target:
find_idx.append(idx)
count_idx += 1
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
if find_idx[0] == 0:
train = dataset[find_idx[-1] + 1:, :]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = dataset[:find_idx[0], :]
temp2 = dataset[find_idx[-1] + 1:, :]
train = np.vstack([temp1, temp2])
elif find_idx[-1] + 1 == drow:
train = dataset[:find_idx[-1] + 1, :]
dataset_list.append(target)
train_list.append(train)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
if find_idx[0] == 0:
trainp = self.plabel[find_idx[-1] + 1:]
trainc = self.tlabel[find_idx[-1] + 1:]
elif find_idx[0] != 0 and find_idx[-1] + 1 != drow:
temp1 = self.plabel[:find_idx[0]]
temp2 = self.plabel[find_idx[-1] + 1:]
trainp = np.concatenate([temp1, temp2])
temp1 = self.tlabel[:find_idx[0]]
temp2 = self.tlabel[find_idx[-1] + 1:]
trainc = np.concatenate([temp1, temp2])
elif find_idx[-1] + 1 == drow:
trainp = self.plabel[:find_idx[-1] + 1]
trainc = self.tlabel[:find_idx[-1] + 1]
target_indexes, _ = dataset_list[0].shape
train_indexes, _ = train_list[0].shape
random_list1 = sample(range(target_indexes), target_indexes)
random_list2 = sample(range(train_indexes), train_indexes)
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[random_list1]
test_dict["people"] = targetp[random_list1]
test_dict["tag"] = targetc[random_list1]
for i, dataset in enumerate(train_list):
train_dict[f"data_{i}"] = dataset[random_list2]
train_dict["people"] = trainp[random_list2]
train_dict["tag"] = trainc[random_list2]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class LeaveOneDP_select(BaseDivideProcess):
"""
LeaveOne sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
seed_num = 0
for repeat in range(20):
train_dict = dict()
test_dict = dict()
class_collect = dict()
for target_class in range(1, self.nb_class+1):
# per label collect
data1 = self.dataset[0][target_class == self.dataset[0][:, -1]]
data2 = self.dataset[1][target_class == self.dataset[0][:, -1]]
data3 = self.dataset[2][target_class == self.dataset[0][:, -1]]
per_people = list()
for peo_target in range(self.nb_people+1):
find_idx = []
count_idx = 0
drow, _ = data1.shape
for idx in range(drow):
if data1[idx, -2] == peo_target:
find_idx.append(idx)
count_idx += 1
if len(find_idx) == 0:
continue
dataset_list = list()
for dataset in [data1, data2, data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
dataset_list.append(target)
per_people.append(dataset_list)
class_collect[target_class] = per_people
test_list = list()
train_list = list()
for key, datalist in class_collect.items():
class_len = len(datalist)
seed(seed_num)
seed_num += 1
ridx = sample(range(class_len), class_len)
temp_test = datalist.pop(ridx[0])
temp_train = datalist
test_list.append(temp_test)
train_list.extend(temp_train)
for sens in range(3):
for i, data in enumerate(test_list):
if i == 0:
test_dict[f"data_{sens}"] = data[sens][:, :-2]
if sens == 0:
test_dict["people"] = data[sens][:, -2]
test_dict["tag"] = data[sens][:, -1]
else:
test_dict[f"data_{sens}"] = np.vstack([test_dict[f"data_{sens}"], data[sens][:, :-2]])
if sens == 0:
test_dict["people"] = np.concatenate([test_dict["people"], data[sens][:, -2]])
test_dict["tag"] = np.concatenate([test_dict["tag"], data[sens][:, -1]])
for i, data in enumerate(train_list):
if i == 0:
train_dict[f"data_{sens}"] = data[sens][:, :-2]
if sens == 0:
train_dict["people"] = data[sens][:, -2]
train_dict["tag"] = data[sens][:, -1]
else:
train_dict[f"data_{sens}"] = np.vstack([train_dict[f"data_{sens}"], data[sens][:, :-2]])
if sens == 0:
train_dict["people"] = np.concatenate([train_dict["people"], data[sens][:, -2]])
train_dict["tag"] = np.concatenate([train_dict["tag"], data[sens][:, -1]])
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpiDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self, s1=3, s2=50):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
for people_target in range(self.nb_people):
find_idx = []
for idx in range(drow):
if self.plabel[idx] == people_target:
find_idx.append(idx)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
dataset_list.append(target)
targetp = self.plabel[find_idx[0]:find_idx[-1] + 1]
targetc = self.tlabel[find_idx[0]:find_idx[-1] + 1]
for class_target in range(self.nb_class):
find_idx = []
count_idx = 0
for idx in range(dataset_list[0].shape[0]):
if targetc[idx] == class_target + 1:
find_idx.append(idx)
count_idx += 1
class_list = list()
try:
for dataset in dataset_list:
target = dataset[find_idx[0]:find_idx[-1] + 1, :]
class_list.append(target)
sec_targetp = targetp[find_idx[0]:find_idx[-1] + 1]
sec_targetc = targetc[find_idx[0]:find_idx[-1] + 1]
except:
class_list = list()
continue
random_list = sample(range(count_idx), count_idx)
for i, target in enumerate(class_list):
class_list[i] = target[random_list]
sec_targetp = sec_targetp[random_list]
sec_targetc = sec_targetc[random_list]
if s2 != -1:
if people_target == 0:
for i, dataset in enumerate(class_list):
train_dict[f"data_{i}"] = dataset[:s1, :]
test_dict[f"data_{i}"] = dataset[s1:s2, :]
train_dict["people"] = sec_targetp[:s1]
train_dict["tag"] = sec_targetc[:s1]
test_dict["people"] = sec_targetp[s1:s2]
test_dict["tag"] = sec_targetc[s1:s2]
else:
for i, dataset in enumerate(class_list):
train_dict[f"data_{i}"] = np.vstack([train_dict[f"data_{i}"], dataset[:s1, :]])
test_dict[f"data_{i}"] = np.vstack([test_dict[f"data_{i}"], dataset[s1:s2, :]])
train_dict["people"] = np.concatenate([train_dict["people"], sec_targetp[:s1]])
train_dict["tag"] = np.concatenate([train_dict["tag"], sec_targetc[:s1]])
test_dict["people"] = np.concatenate([test_dict["people"], sec_targetp[s1:s2]])
test_dict["tag"] = np.concatenate([test_dict["tag"], sec_targetc[s1:s2]])
else:
if people_target == 0:
for i, dataset in enumerate(class_list):
train_dict[f"data_{i}"] = dataset[:s1, :]
test_dict[f"data_{i}"] = dataset[s1:, :]
train_dict["people"] = sec_targetp[:s1]
train_dict["tag"] = sec_targetc[:s1]
test_dict["people"] = sec_targetp[s1:]
test_dict["tag"] = sec_targetc[s1:]
else:
for i, dataset in enumerate(class_list):
train_dict[f"data_{i}"] = np.vstack([train_dict[f"data_{i}"], dataset[:s1, :]])
test_dict[f"data_{i}"] = np.vstack([test_dict[f"data_{i}"], dataset[s1:, :]])
train_dict["people"] = np.concatenate([train_dict["people"], sec_targetp[:s1]])
train_dict["tag"] = np.concatenate([train_dict["tag"], sec_targetc[:s1]])
test_dict["people"] = np.concatenate([test_dict["people"], sec_targetp[s1:]])
test_dict["tag"] = np.concatenate([test_dict["tag"], sec_targetc[s1:]])
other_samples, _ = train_dict["data_0"].shape
random_list = sample(range(other_samples), other_samples)
train_dict["people"] = train_dict["people"][random_list]
train_dict["tag"] = train_dict["tag"][random_list]
for i in range(3):
train_dict[f"data_{i}"] = train_dict[f"data_{i}"][random_list]
other_samples, _ = test_dict["data_0"].shape
random_list = sample(range(other_samples), other_samples)
test_dict["people"] = test_dict["people"][random_list]
test_dict["tag"] = test_dict["tag"][random_list]
for i in range(3):
test_dict[f"data_{i}"] = test_dict[f"data_{i}"][random_list]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpi_dhalfDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
half_idx = int(drow / 2)
# get decimal
result = 0
previous = 0
n = 10
while result == 0:
output = round(half_idx // n)
if output == 0:
n = n / 10
result = previous * n
else:
previous = output
n = n * 10
drop_idx = int(result)
# drop_idx = 10**(len(half_idx) - 1)
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:drop_idx, :]
test_dict[f"data_{i}"] = dataset[drop_idx:2*drop_idx, :]
train_dict["people"] = targetp[:drop_idx]
train_dict["tag"] = targetc[:drop_idx]
test_dict["people"] = targetp[drop_idx:2*drop_idx]
test_dict["tag"] = targetc[drop_idx:2*drop_idx]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpi_halfDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
half_idx = int(drow/2)
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:half_idx, :]
test_dict[f"data_{i}"] = dataset[half_idx:, :]
train_dict["people"] = targetp[:half_idx]
train_dict["tag"] = targetc[:half_idx]
test_dict["people"] = targetp[half_idx:]
test_dict["tag"] = targetc[half_idx:]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# LeaveOne sampling Class
class mdpi_MCCVDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(20):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
mcv_rate = int(drow * 0.7)
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[:mcv_rate, :]
test_dict[f"data_{i}"] = dataset[mcv_rate:, :]
train_dict["people"] = targetp[:mcv_rate]
train_dict["tag"] = targetc[:mcv_rate]
test_dict["people"] = targetp[mcv_rate:]
test_dict["tag"] = targetc[mcv_rate:]
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# 7 - Cross Validation sampling Class
class seven_CVDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(5):
seed(repeat)
drow, _ = self.data1.shape
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
cv_rate = int(drow / 7)
for cvi in range(7):
train_dict = dict()
test_dict = dict()
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[cv_rate*cvi: cv_rate*(cvi+1), :]
test_dict["people"] = targetp[cv_rate*cvi: cv_rate*(cvi+1)]
test_dict["tag"] = targetc[cv_rate*cvi: cv_rate*(cvi+1)]
indexing = np.arange(cv_rate*cvi, cv_rate*(cvi+1))
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = np.array([element for idx, element in enumerate(dataset) if idx not in indexing])
train_dict["people"] = np.array([element for idx, element in enumerate(targetp) if idx not in indexing])
train_dict["tag"] = np.array([element for idx, element in enumerate(targetc) if idx not in indexing])
# if cvi == 0:
# for i, dataset in enumerate(dataset_list):
# test_dict[f"data_{i}"] = dataset[cv_rate:, :]
# test_dict["people"] = targetp[cv_rate:]
# test_dict["tag"] = targetc[cv_rate:]
# elif cvi == 6:
# for i, dataset in enumerate(dataset_list):
# test_dict[f"data_{i}"] = dataset[:cv_rate*cvi, :]
# test_dict["people"] = targetp[:cv_rate*cvi]
# test_dict["tag"] = targetc[:cv_rate*cvi]
# else:
# for i, dataset in enumerate(dataset_list):
# temp1 = dataset[:cv_rate*cvi, :]
# temp2 = dataset[cv_rate*(cvi+1):, :]
# test_dict[f"data_{i}"] = np.vstack([temp1, temp2])
# test_dict["people"] = np.vstack([targetp[:cv_rate*cvi], targetp[cv_rate*(cvi+1):]])
# test_dict["tag"] = np.vstack([targetc[:cv_rate*cvi], targetc[cv_rate*(cvi+1):]])
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# Selected Cross Validation sampling Class
class select_CVDP(BaseDivideProcess):
"""
mdpi sampling
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
for repeat in range(10):
seed(repeat)
drow, _ = self.data1.shape
train_dict = dict()
test_dict = dict()
rindx_list = sample(range(drow), drow)
dataset_list = list()
for dataset in [self.data1, self.data2, self.data3]:
randomized = dataset[rindx_list]
dataset_list.append(randomized)
targetc = self.tlabel[rindx_list]
targetp = self.plabel[rindx_list]
cv_rate = int(drow / 7)
for cvi in range(7):
for i, dataset in enumerate(dataset_list):
train_dict[f"data_{i}"] = dataset[cv_rate*cvi: cv_rate*cvi+1, :]
train_dict["people"] = targetp[:cv_rate]
train_dict["tag"] = targetc[:cv_rate]
if cvi == 0:
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[cv_rate:, :]
test_dict["people"] = targetp[cv_rate:]
test_dict["tag"] = targetc[cv_rate:]
elif cvi == 6:
for i, dataset in enumerate(dataset_list):
test_dict[f"data_{i}"] = dataset[:cv_rate*cvi, :]
test_dict["people"] = targetp[:cv_rate*cvi]
test_dict["tag"] = targetc[:cv_rate*cvi]
else:
for i, dataset in enumerate(dataset_list):
temp1 = dataset[:cv_rate*cvi, :]
temp2 = dataset[cv_rate*cvi+1:, :]
test_dict[f"data_{i}"] = np.vstack([temp1, temp2])
test_dict["people"] = np.vstack([targetp[:cv_rate*cvi], targetp[cv_rate*cvi+1]])
test_dict["tag"] = np.vstack([targetc[:cv_rate*cvi], targetc[cv_rate*cvi+1]])
total_dataset["train"].append(train_dict)
total_dataset["test"].append(test_dict)
return total_dataset
# Base Divide Process Class
class BaseVectorDivideProcess:
def __init__(self, mode, model_name, dataset):
assert len(dataset) == 3, "dataset must be 3 arguments"
pressure, accl, accr, gyrl, gyrr, info = dataset
# [data1, data2, data3] = pp.sort_by_people(dataset)
# sampling func name
self.mode = mode
# used model name
self.model_name = model_name
self.dataset = dataset
self.real_plabel = info[:, 0]
self.plabel = info[:, 1]
self.tlabel = info[:, 2]
# dataset index
self.pressure = pressure
self.acc = [accl, accr]
self.gyro = [gyrl, gyrr]
self.nb_class = int(max(self.tlabel))
self.nb_people = int(max(self.plabel)) + 1
def sampling(self):
pass
def convert(self, data, mt, comb):
# need to update
drow, dcol = data.shape
input_shape = (int(mt * comb), int((dcol) / (mt * comb)))
if self.model_name in method_info['4columns']:
converted = data.reshape(-1, input_shape[0], input_shape[1], 1)
elif self.model_name == "pVGG":
data = data.reshape(-1, input_shape[0], input_shape[1])
converted = np.zeros((data.shape[0], data.shape[1], data.shape[2], 3))
for idx in range(3):
converted[:, :, :, idx] = data
elif self.model_name in method_info['3columns']:
converted = data.reshape(-1, input_shape[0], input_shape[1])
elif self.model_name in method_info['2columns']:
converted = data
elif self.model_name in method_info['specific']:
converted = data
elif self.model_name in method_info['vector']:
converted = data
return converted
class method_as_vector(BaseDivideProcess):
"""
convert vector method
"""
def __init__(self, mode, model_name, dataset, rsub):
super().__init__(mode, model_name, dataset)
print(f"{datetime.datetime.now()} :: Divide Process : {self.mode}")
def sampling(self):
total_dataset = dict()
total_dataset["train"] = list()
total_dataset["test"] = list()
return total_dataset | python | 52,446 |
import errno
import os
import stat
import subprocess
from jupyter_core.paths import jupyter_data_dir
from notebook.auth import passwd
# Setup the Notebook to listen on all interfaces on port 8888 by default
c.NotebookApp.ip = '*'
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
# Configure Networking while running under Marathon:
if 'MARATHON_APP_ID' in os.environ:
if 'PORT_JUPYTER' in os.environ:
c.NotebookApp.port = int(os.environ['PORT_JUPYTER'])
# Set the Access-Control-Allow-Origin header
c.NotebookApp.allow_origin = '*'
# Set Jupyter Notebook Server password to 'jupyter-<Marathon-App-Prefix>'
# e.g., Marathon App ID '/foo/bar/app' maps to password: 'jupyter-foo-bar'
MARATHON_APP_PREFIX = \
'-'.join(os.environ['MARATHON_APP_ID'].split('/')[:-1])
c.NotebookApp.password = passwd('jupyter{}'.format(MARATHON_APP_PREFIX))
# Allow CORS and TLS from behind Marathon-LB/HAProxy
# Trust X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For
# Necessary if the proxy handles SSL
if 'MARATHON_APP_LABEL_HAPROXY_GROUP' in os.environ:
c.NotebookApp.trust_xheaders = True
if 'MARATHON_APP_LABEL_HAPROXY_0_VHOST' in os.environ:
c.NotebookApp.allow_origin = \
'http://{}'.format(
os.environ['MARATHON_APP_LABEL_HAPROXY_0_VHOST']
)
if 'MARATHON_APP_LABEL_HAPROXY_0_REDIRECT_TO_HTTPS' in os.environ:
c.NotebookApp.allow_origin = \
'https://{}'.format(
os.environ['MARATHON_APP_LABEL_HAPROXY_0_VHOST']
)
# Set the Jupyter Notebook server base URL to the HAPROXY_PATH specified
if 'MARATHON_APP_LABEL_HAPROXY_0_PATH' in os.environ:
c.NotebookApp.base_url = \
os.environ['MARATHON_APP_LABEL_HAPROXY_0_PATH']
# Setup TLS
if 'USE_HTTPS' in os.environ:
SCHEDULER_TLS_CERT = os.environ.get('TLS_CERT_PATH', '/'.join(
[os.environ['MESOS_SANDBOX'],
'.ssl',
'scheduler.crt']))
c.NotebookApp.certfile = SCHEDULER_TLS_CERT
SCHEDULER_TLS_KEY = os.environ.get('TLS_KEY_PATH', '/'.join(
[os.environ['MESOS_SANDBOX'],
'.ssl',
'scheduler.key']))
c.NotebookApp.keyfile = SCHEDULER_TLS_KEY
# Set a certificate if USE_HTTPS is set to any value
PEM_FILE = os.path.join(jupyter_data_dir(), 'notebook.pem')
if 'USE_HTTPS' in os.environ:
if not os.path.isfile(PEM_FILE):
# Ensure PEM_FILE directory exists
DIR_NAME = os.path.dirname(PEM_FILE)
try:
os.makedirs(DIR_NAME)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(DIR_NAME):
pass
else:
raise
# Generate a certificate if one doesn't exist on disk
subprocess.check_call(['openssl', 'req', '-new', '-newkey', 'rsa:2048',
'-days', '365', '-nodes', '-x509', '-subj',
'/C=XX/ST=XX/L=XX/O=generated/CN=generated',
'-keyout', PEM_FILE, '-out', PEM_FILE])
# Restrict access to PEM_FILE
os.chmod(PEM_FILE, stat.S_IRUSR | stat.S_IWUSR)
c.NotebookApp.certfile = PEM_FILE
# Set a password if JUPYTER_PASSWORD is set
if 'JUPYTER_PASSWORD' in os.environ:
c.NotebookApp.password = passwd(os.environ['JUPYTER_PASSWORD'])
del os.environ['JUPYTER_PASSWORD']
| python | 3,490 |
from rest_framework.routers import DefaultRouter
from .views import ShopViewSet
from shops.models import Shop
from .serializers import ShopSerializer
from django.urls import path, include
from rest_framework.urlpatterns import format_suffix_patterns
app_name = "api"
router = DefaultRouter()
router.register("Shops", ShopViewSet, basename="shop")
#router.register("products", ProductViewSet)
shop_list = ShopViewSet.as_view({"get":"list", "post":"create"})
shop_detail = ShopViewSet.as_view({"get":"retrieve"})
urlpatterns = [
path("shops/", shop_list, name="shop-list"),
path("shops/<slug:slug>/", shop_detail, name="shop-detail"),
]
urlpatterns = format_suffix_patterns(urlpatterns, allowed=["json"])
| python | 720 |
# Generated by Django 3.1.7 on 2021-04-03 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0002_order_orderitem_review_shippingaddress'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| python | 424 |
"""
Module contains tools for processing files into DataFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import re
import sys
from textwrap import fill
from typing import Any, Dict, Set
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas._typing import FilePathOrBuffer
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
from pandas.io.common import (
UTF8Recoder,
get_filepath_or_buffer,
get_handle,
infer_compression,
validate_header_arg,
)
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{summary}
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handler (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparseable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are `None` for the ordinary converter,
`high` for the high-precision converter, and `round_trip` for the
round-trip converter.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.{func_name}('data.csv') # doctest: +SKIP
"""
)
def _validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Raises
------
ValueError
If names are not unique.
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
encoding = kwds.get("encoding", None)
if encoding is not None:
encoding = re.sub("_", "-", encoding).lower()
kwds["encoding"] = encoding
compression = kwds.get("compression", "infer")
compression = infer_compression(filepath_or_buffer, compression)
# TODO: get_filepath_or_buffer could return
# Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
# though mypy handling of conditional imports is difficult.
# See https://github.com/python/mypy/issues/1297
fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
filepath_or_buffer, encoding, compression
)
kwds["compression"] = compression
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
chunksize = _validate_integer("chunksize", kwds.get("chunksize", None), 1)
nrows = kwds.get("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(fp_or_buf, **kwds)
if chunksize or iterator:
return parser
try:
data = parser.read(nrows)
finally:
parser.close()
if should_close:
try:
fp_or_buf.close()
except ValueError:
pass
return data
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"lineterminator": None,
"header": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_format": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_map": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
def _make_parser_function(name, default_sep=","):
def parser_f(
filepath_or_buffer: FilePathOrBuffer,
sep=default_sep,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
):
# gh-23761
#
# When a dialect is passed, it overrides any of the overlapping
# parameters passed in directly. We don't want to warn if the
# default parameters were passed in (since it probably means
# that the user didn't pass them in explicitly in the first place).
#
# "delimiter" is the annoying corner case because we alias it to
# "sep" before doing comparison to the dialect values later on.
# Thus, we need a flag to indicate that we need to "override"
# the comparison to dialect values by checking if default values
# for BOTH "delimiter" and "sep" were provided.
if dialect is not None:
sep_override = delimiter is None and sep == default_sep
kwds = dict(sep_override=sep_override)
else:
kwds = dict()
# Alias sep -> delimiter.
if delimiter is None:
delimiter = sep
if delim_whitespace and delimiter != default_sep:
raise ValueError(
"Specified a delimiter with both sep and"
" delim_whitespace=True; you can only"
" specify one."
)
if engine is not None:
engine_specified = True
else:
engine = "c"
engine_specified = False
kwds.update(
delimiter=delimiter,
engine=engine,
dialect=dialect,
compression=compression,
engine_specified=engine_specified,
doublequote=doublequote,
escapechar=escapechar,
quotechar=quotechar,
quoting=quoting,
skipinitialspace=skipinitialspace,
lineterminator=lineterminator,
header=header,
index_col=index_col,
names=names,
prefix=prefix,
skiprows=skiprows,
skipfooter=skipfooter,
na_values=na_values,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
thousands=thousands,
comment=comment,
decimal=decimal,
parse_dates=parse_dates,
keep_date_col=keep_date_col,
dayfirst=dayfirst,
date_parser=date_parser,
cache_dates=cache_dates,
nrows=nrows,
iterator=iterator,
chunksize=chunksize,
converters=converters,
dtype=dtype,
usecols=usecols,
verbose=verbose,
encoding=encoding,
squeeze=squeeze,
memory_map=memory_map,
float_precision=float_precision,
na_filter=na_filter,
delim_whitespace=delim_whitespace,
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
low_memory=low_memory,
mangle_dupe_cols=mangle_dupe_cols,
infer_datetime_format=infer_datetime_format,
skip_blank_lines=skip_blank_lines,
)
return _read(filepath_or_buffer, kwds)
parser_f.__name__ = name
return parser_f
read_csv = _make_parser_function("read_csv", default_sep=",")
read_csv = Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
)
)(read_csv)
read_table = _make_parser_function("read_table", default_sep="\t")
read_table = Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
)
)(read_table)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self._engine_specified = kwds.get("engine_specified", engine_specified)
if kwds.get("dialect") is not None:
dialect = kwds["dialect"]
if dialect in csv.list_dialects():
dialect = csv.get_dialect(dialect)
# Any valid dialect should have these attributes.
# If any are missing, we will raise automatically.
for param in (
"delimiter",
"doublequote",
"escapechar",
"skipinitialspace",
"quotechar",
"quoting",
):
try:
dialect_val = getattr(dialect, param)
except AttributeError:
raise ValueError(f"Invalid dialect {kwds['dialect']} provided")
parser_default = _parser_defaults[param]
provided = kwds.get(param, parser_default)
# Messages for conflicting values between the dialect
# instance and the actual parameters provided.
conflict_msgs = []
# Don't warn if the default parameter was passed in,
# even if it conflicts with the dialect (gh-23761).
if provided != parser_default and provided != dialect_val:
msg = (
f"Conflicting values for '{param}': '{provided}' was "
f"provided, but the dialect specifies '{dialect_val}'. "
"Using the dialect-specified value."
)
# Annoying corner case for not warning about
# conflicts between dialect and delimiter parameter.
# Refer to the outer "_read_" function for more info.
if not (param == "delimiter" and kwds.pop("sep_override", False)):
conflict_msgs.append(msg)
if conflict_msgs:
warnings.warn(
"\n\n".join(conflict_msgs), ParserWarning, stacklevel=2
)
kwds[param] = dialect_val
if kwds.get("skipfooter"):
if kwds.get("iterator") or kwds.get("chunksize"):
raise ValueError("'skipfooter' not supported for 'iteration'")
if kwds.get("nrows"):
raise ValueError("'skipfooter' not supported with 'nrows'")
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
self.orig_options = kwds
# miscellanea
self.engine = engine
self._engine = None
self._currow = 0
options = self._get_options_with_defaults(engine)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
# might mutate self.engine
self.engine = self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the"
f" {repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == "python-fwf":
for argname, default in _fwf_defaults.items():
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f):
next_attr = "__next__"
# The C engine doesn't need the file-like to have the "next" or
# "__next__" attribute. However, the Python engine explicitly calls
# "next(...)" when iterating through such an object, meaning it
# needs to have that attribute ("next" for Python 2.x, "__next__"
# for Python 3.x)
if engine != "c" and not hasattr(f, next_attr):
msg = "The 'python' engine cannot iterate through this file buffer."
raise ValueError(msg)
return engine
def _clean_options(self, options, engine):
result = options.copy()
engine_specified = self._engine_specified
fallback_reason = None
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
encoding = sys.getfilesystemencoding() or "utf-8"
if sep is None and not delim_whitespace:
if engine == "c":
fallback_reason = (
"the 'c' engine does not support"
" sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and len(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are "
"interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
fallback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and isinstance(quotechar, (str, bytes)):
if (
len(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support "
"such quotechars"
)
engine = "python"
if fallback_reason and engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Falling back to the 'python' engine because "
f"{fallback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if fallback_reason:
warnings.warn(
(
"Falling back to the 'python' engine because "
f"{fallback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_arg(options["header"])
depr_warning = ""
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
msg = (
f"The {repr(arg)} argument has been deprecated and will be "
"removed in a future version."
)
if result.get(arg, depr_default) != depr_default:
depr_warning += msg + "\n\n"
else:
result[arg] = parser_default
if depr_warning != "":
warnings.warn(depr_warning, FutureWarning, stacklevel=2)
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
if engine == "c":
self._engine = CParserWrapper(self.f, **self.options)
else:
if engine == "python":
klass = PythonParser
elif engine == "python-fwf":
klass = FixedWidthFieldParser
else:
raise ValueError(
f"Unknown engine: {engine} (valid options are"
' "c", "python", or'
' "python-fwf")'
)
self._engine = klass(self.f, **self.options)
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = _validate_integer("nrows", nrows)
ret = self._engine.read(nrows)
# May alter columns / col_dict
index, columns, col_dict = self._create_index(ret)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def _create_index(self, ret):
index, columns, col_dict = ret
return index, columns, col_dict
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(columns):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
return (
len(columns)
and not isinstance(columns, MultiIndex)
and all(isinstance(c, tuple) for c in columns)
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, "
f"columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string", "unicode"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.get("names")
self.orig_names = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.get("index_col", None)
self.unnamed_cols = set()
self.index_names = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.get("na_values")
self.na_fvalues = kwds.get("na_fvalues")
self.na_filter = kwds.get("na_filter", False)
self.keep_default_na = kwds.get("keep_default_na", True)
self.true_values = kwds.get("true_values")
self.false_values = kwds.get("false_values")
self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True)
self.infer_datetime_format = kwds.pop("infer_datetime_format", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format,
cache_dates=self.cache_dates,
)
# validate header options for mi
self.header = kwds.get("header")
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if any(i < 0 for i in self.header):
raise ValueError(
"cannot specify multi-index header with negative integers"
)
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
)
if kwds.get("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
# GH 16338
elif self.header is not None and not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
# GH 27779
elif self.header is not None and self.header < 0:
raise ValueError(
"Passing negative integer to header is invalid. "
"For no header, use header=None instead"
)
self._name_processed = False
self._first_chunk = True
# GH 13932
# keep references to file handles opened by the parser itself
self.handles = []
def close(self):
for f in self.handles:
f.close()
@property
def _has_complex_date_col(self):
return isinstance(self.parse_dates, dict) or (
isinstance(self.parse_dates, list)
and len(self.parse_dates) > 0
and isinstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header, index_names, col_names, passed_names=False
):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header)))
names = ic + columns
# If we find unnamed columns all in a single
# level, then our header was too long.
for n in range(len(columns[0])):
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
raise ParserError(
"Passed header=[{header}] are too many rows for this "
"multi_index of columns".format(
header=",".join(str(x) for x in self.header)
)
)
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [
r[0] if (len(r[0]) and r[0] not in self.unnamed_cols) else None
for r in header
]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
counts = defaultdict(int)
is_potential_mi = _is_potential_multi_index(names)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
index = index.set_names(indexnamerow[:coffset])
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True):
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will "
"be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool=False
)
else:
is_str_or_ea_dtype = is_string_dtype(
cast_type
) or is_extension_array_dtype(cast_type)
# skip inference if specified dtype is object
# or casting to an EA
try_num_bool = not (cast_type and is_str_or_ea_dtype)
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool
)
# type specified in dtype param or cast_type is an EA
if cast_type and (
not is_dtype_equal(cvals, cast_type)
or is_extension_array_dtype(cast_type)
):
try:
if (
is_bool_dtype(cast_type)
and not is_categorical_dtype(cast_type)
and na_count > 0
):
raise ValueError(f"Bool column has NA values in column {c}")
except (AttributeError, TypeError):
# invalid input to is_bool_dtype
pass
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
if verbose and na_count:
print(f"Filled {na_count} NA values in column {c!s}")
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns
-------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool and is_object_dtype(values.dtype):
# exclude e.g DatetimeIndex here
try:
result = lib.maybe_convert_numeric(values, na_values, False)
except (ValueError, TypeError):
# e.g. encountering datetime string gets ValueError
# TypeError can be raised in floatify
result = values
na_count = parsers.sanitize_objects(result, na_values, False)
else:
na_count = isna(result).sum()
else:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = libops.maybe_convert_bool(
np.asarray(values),
true_values=self.true_values,
false_values=self.false_values,
)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (
isinstance(cast_type, CategoricalDtype)
and cast_type.categories is not None
)
if not is_object_dtype(values) and not known_cats:
# XXX this is for consistency with
# c-parser which parses all categories
# as strings
values = astype_nansafe(values, str)
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type, true_values=self.true_values
)
# use the EA's implementation of casting
elif is_extension_array_dtype(cast_type):
# ensure cast_type is an actual dtype and not a string
cast_type = pandas_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
"_from_sequence_of_strings in order "
"to be used in parser methods"
)
else:
try:
values = astype_nansafe(values, cast_type, copy=True, skipna=True)
except ValueError:
raise ValueError(
f"Unable to convert column {column} to type {cast_type}"
)
return values
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
data, names = _process_date_conversion(
data,
self._date_conv,
self.parse_dates,
self.index_col,
self.index_names,
names,
keep_date_col=self.keep_date_col,
)
return names, data
class CParserWrapper(ParserBase):
"""
"""
def __init__(self, src, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
if kwds.get("compression") is None and "utf-16" in (kwds.get("encoding") or ""):
# if source is utf-16 plain text, convert source to utf-8
if isinstance(src, str):
src = open(src, "rb")
self.handles.append(src)
src = UTF8Recoder(src, kwds["encoding"])
kwds["encoding"] = "utf-8"
# #2442
kwds["allow_leading_cols"] = self.index_col is not False
# GH20529, validate usecol arg before TextReader
self.usecols, self.usecols_dtype = _validate_usecols_arg(kwds["usecols"])
kwds["usecols"] = self.usecols
self._reader = parsers.TextReader(src, **kwds)
self.unnamed_cols = self._reader.unnamed_cols
passed_names = self.names is None
if self._reader.header is None:
self.names = None
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
(
self.names,
self.index_names,
self.col_names,
passed_names,
) = self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names, passed_names
)
else:
self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
self.names = [
f"{self.prefix}{i}" for i in range(self._reader.table_width)
]
else:
self.names = list(range(self._reader.table_width))
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
self.orig_names = self.names[:]
if self.usecols:
usecols = _evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
if self.usecols_dtype == "string" and not set(usecols).issubset(
self.orig_names
):
_validate_usecols_names(usecols, self.orig_names)
if len(self.names) > len(usecols):
self.names = [
n
for i, n in enumerate(self.names)
if (i in usecols or n in usecols)
]
if len(self.names) < len(usecols):
_validate_usecols_names(usecols, self.names)
self._set_noconvert_columns()
self.orig_names = self.names
if not self._has_complex_date_col:
if self._reader.leading_cols == 0 and _is_index_col(self.index_col):
self._name_processed = True
(index_names, self.names, self.index_col) = _clean_index_names(
self.names, self.index_col, self.unnamed_cols
)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
def close(self):
for f in self.handles:
f.close()
# close additional handles opened by C parser (for compression)
try:
self._reader.close()
except ValueError:
pass
def _set_noconvert_columns(self):
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
names = self.orig_names
if self.usecols_dtype == "integer":
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
usecols.sort()
elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = self.names[:]
else:
# Usecols is empty.
usecols = None
def _set(x):
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = names.index(x)
self._reader.set_noconvert(x)
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
def set_error_bad_lines(self, status):
self._reader.set_error_bad_lines(int(status))
def read(self, nrows=None):
try:
data = self._reader.read(nrows)
except StopIteration:
if self._first_chunk:
self._first_chunk = False
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names,
self.index_col,
self.index_names,
dtype=self.kwds.get("dtype"),
)
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
if self.usecols is not None:
columns = self._filter_usecols(columns)
col_dict = dict(
filter(lambda item: item[0] in columns, col_dict.items())
)
return index, columns, col_dict
else:
raise
# Done with first read, next time raise StopIteration
self._first_chunk = False
names = self.names
if self._reader.leading_cols:
if self._has_complex_date_col:
raise NotImplementedError("file structure not yet supported")
# implicit index, no index names
arrays = []
for i in range(self._reader.leading_cols):
if self.index_col is None:
values = data.pop(i)
else:
values = data.pop(self.index_col[i])
values = self._maybe_parse_dates(values, i, try_parse_dates=True)
arrays.append(values)
index = ensure_index_from_sequences(arrays)
if self.usecols is not None:
names = self._filter_usecols(names)
names = self._maybe_dedup_names(names)
# rename dict keys
data = sorted(data.items())
data = {k: v for k, (i, v) in zip(names, data)}
names, data = self._do_date_conversions(names, data)
else:
# rename dict keys
data = sorted(data.items())
# ugh, mutation
names = list(self.orig_names)
names = self._maybe_dedup_names(names)
if self.usecols is not None:
names = self._filter_usecols(names)
# columns as list
alldata = [x[1] for x in data]
data = {k: v for k, (i, v) in zip(names, data)}
names, data = self._do_date_conversions(names, data)
index, names = self._make_index(data, alldata, names)
# maybe create a mi on the columns
names = self._maybe_make_multi_index_columns(names, self.col_names)
return index, names, data
def _filter_usecols(self, names):
# hackish
usecols = _evaluate_usecols(self.usecols, names)
if usecols is not None and len(names) != len(usecols):
names = [
name for i, name in enumerate(names) if i in usecols or name in usecols
]
return names
def _get_index_names(self):
names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
(idx_names, names, self.index_col) = _clean_index_names(
names, self.index_col, self.unnamed_cols
)
return names, idx_names
def _maybe_parse_dates(self, values, index, try_parse_dates=True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
def TextParser(*args, **kwds):
"""
Converts lists of lists/tuples into DataFrames with proper type inference
and optional (e.g. string to datetime) conversion. Also enables iterating
lazily over chunks of large files
Parameters
----------
data : file-like object or list
delimiter : separator character to use
dialect : str or csv.Dialect instance, optional
Ignored if delimiter is longer than 1 character
names : sequence, default
header : int, default 0
Row to use to parse column labels. Defaults to the first row. Prior
rows will be discarded
index_col : int or list, optional
Column or columns to use as the (possibly hierarchical) index
has_index_names: bool, default False
True if the cols defined in index_col have an index name and are
not in the header.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN.
keep_default_na : bool, default True
thousands : str, optional
Thousands separator
comment : str, optional
Comment out remainder of line
parse_dates : bool, default False
keep_date_col : bool, default False
date_parser : function, optional
skiprows : list of integers
Row numbers to skip
skipfooter : int
Number of line at bottom of file to skip
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8')
squeeze : bool, default False
returns Series if only one column.
infer_datetime_format: bool, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are None for the ordinary converter,
'high' for the high-precision converter, and 'round_trip' for the
round-trip converter.
"""
kwds["engine"] = "python"
return TextFileReader(*args, **kwds)
def count_empty_vals(vals):
return sum(1 for v in vals if v == "" or v is None)
class PythonParser(ParserBase):
def __init__(self, f, **kwds):
"""
Workhorse function for processing nested list into DataFrame
"""
ParserBase.__init__(self, kwds)
self.data = None
self.buf = []
self.pos = 0
self.line_pos = 0
self.encoding = kwds["encoding"]
self.compression = kwds["compression"]
self.memory_map = kwds["memory_map"]
self.skiprows = kwds["skiprows"]
if callable(self.skiprows):
self.skipfunc = self.skiprows
else:
self.skipfunc = lambda x: x in self.skiprows
self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
self.delimiter = kwds["delimiter"]
self.quotechar = kwds["quotechar"]
if isinstance(self.quotechar, str):
self.quotechar = str(self.quotechar)
self.escapechar = kwds["escapechar"]
self.doublequote = kwds["doublequote"]
self.skipinitialspace = kwds["skipinitialspace"]
self.lineterminator = kwds["lineterminator"]
self.quoting = kwds["quoting"]
self.usecols, _ = _validate_usecols_arg(kwds["usecols"])
self.skip_blank_lines = kwds["skip_blank_lines"]
self.warn_bad_lines = kwds["warn_bad_lines"]
self.error_bad_lines = kwds["error_bad_lines"]
self.names_passed = kwds["names"] or None
self.has_index_names = False
if "has_index_names" in kwds:
self.has_index_names = kwds["has_index_names"]
self.verbose = kwds["verbose"]
self.converters = kwds["converters"]
self.dtype = kwds["dtype"]
self.thousands = kwds["thousands"]
self.decimal = kwds["decimal"]
self.comment = kwds["comment"]
self._comment_lines = []
f, handles = get_handle(
f,
"r",
encoding=self.encoding,
compression=self.compression,
memory_map=self.memory_map,
)
self.handles.extend(handles)
# Set self.data to something that can read lines.
if hasattr(f, "readline"):
self._make_reader(f)
else:
self.data = f
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
self._col_indices = None
(
self.columns,
self.num_original_columns,
self.unnamed_cols,
) = self._infer_columns()
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
(
self.columns,
self.index_names,
self.col_names,
_,
) = self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
self.columns = self.columns[0]
# get popped off for index
self.orig_names = list(self.columns)
# needs to be cleaned/refactored
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
(index_names, self.orig_names, self.columns) = self._get_index_name(
self.columns
)
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
self._no_thousands_columns = None
if len(self.decimal) != 1:
raise ValueError("Only length-1 decimal markers supported")
if self.thousands is None:
self.nonnum = re.compile(fr"[^-^0-9^{self.decimal}]+")
else:
self.nonnum = re.compile(fr"[^-^0-9^{self.thousands}^{self.decimal}]+")
def _set_no_thousands_columns(self):
# Create a set of column ids that are not to be stripped of thousands
# operators.
noconvert_columns = set()
def _set(x):
if is_integer(x):
noconvert_columns.add(x)
else:
noconvert_columns.add(self.columns.index(x))
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
return noconvert_columns
def _make_reader(self, f):
sep = self.delimiter
if sep is None or len(sep) == 1:
if self.lineterminator:
raise ValueError(
"Custom line terminators not supported in python parser (yet)"
)
class MyDialect(csv.Dialect):
delimiter = self.delimiter
quotechar = self.quotechar
escapechar = self.escapechar
doublequote = self.doublequote
skipinitialspace = self.skipinitialspace
quoting = self.quoting
lineterminator = "\n"
dia = MyDialect
sniff_sep = True
if sep is not None:
sniff_sep = False
dia.delimiter = sep
# attempt to sniff the delimiter
if sniff_sep:
line = f.readline()
while self.skipfunc(self.pos):
self.pos += 1
line = f.readline()
line = self._check_comments([line])[0]
self.pos += 1
self.line_pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
# Note: self.encoding is irrelevant here
line_rdr = csv.reader(StringIO(line), dialect=dia)
self.buf.extend(list(line_rdr))
# Note: self.encoding is irrelevant here
reader = csv.reader(f, dialect=dia, strict=True)
else:
def _read():
line = f.readline()
pat = re.compile(sep)
yield pat.split(line.strip())
for line in f:
yield pat.split(line.strip())
reader = _read()
self.data = reader
def read(self, rows=None):
try:
content = self._get_lines(rows)
except StopIteration:
if self._first_chunk:
content = []
else:
raise
# done with first read, next time raise StopIteration
self._first_chunk = False
columns = list(self.orig_names)
if not len(content): # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names, self.dtype
)
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns, col_dict
# handle new style for names in index
count_empty_content_vals = count_empty_vals(content[0])
indexnamerow = None
if self.has_index_names and count_empty_content_vals == len(columns):
indexnamerow = content[0]
content = content[1:]
alldata = self._rows_to_cols(content)
data = self._exclude_implicit_index(alldata)
columns = self._maybe_dedup_names(self.columns)
columns, data = self._do_date_conversions(columns, data)
data = self._convert_data(data)
index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
def _exclude_implicit_index(self, alldata):
names = self._maybe_dedup_names(self.orig_names)
if self._implicit_index:
excl_indices = self.index_col
data = {}
offset = 0
for i, col in enumerate(names):
while i + offset in excl_indices:
offset += 1
data[col] = alldata[i + offset]
else:
data = {k: v for k, v in zip(names, alldata)}
return data
# legacy
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
return self.read(rows=size)
def _convert_data(self, data):
# apply converters
def _clean_mapping(mapping):
"converts col numbers to names"
clean = {}
for col, v in mapping.items():
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean[col] = v
return clean
clean_conv = _clean_mapping(self.converters)
if not isinstance(self.dtype, dict):
# handles single dtype applied to all columns
clean_dtypes = self.dtype
else:
clean_dtypes = _clean_mapping(self.dtype)
# Apply NA values.
clean_na_values = {}
clean_na_fvalues = {}
if isinstance(self.na_values, dict):
for col in self.na_values:
na_value = self.na_values[col]
na_fvalue = self.na_fvalues[col]
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean_na_values[col] = na_value
clean_na_fvalues[col] = na_fvalue
else:
clean_na_values = self.na_values
clean_na_fvalues = self.na_fvalues
return self._convert_to_ndarrays(
data,
clean_na_values,
clean_na_fvalues,
self.verbose,
clean_conv,
clean_dtypes,
)
def _infer_columns(self):
names = self.names
num_original_columns = 0
clear_buffer = True
unnamed_cols = set()
if self.header is not None:
header = self.header
if isinstance(header, (list, tuple, np.ndarray)):
have_mi_columns = len(header) > 1
# we have a mi columns, so read an extra line
if have_mi_columns:
header = list(header) + [header[-1] + 1]
else:
have_mi_columns = False
header = [header]
columns = []
for level, hr in enumerate(header):
try:
line = self._buffered_line()
while self.line_pos <= hr:
line = self._next_line()
except StopIteration:
if self.line_pos < hr:
raise ValueError(
f"Passed header={hr} but only {self.line_pos + 1} lines in "
"file"
)
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
if have_mi_columns and hr > 0:
if clear_buffer:
self._clear_buffer()
columns.append([None] * len(columns[-1]))
return columns, num_original_columns, unnamed_cols
if not self.names:
raise EmptyDataError("No columns to parse from file")
line = self.names[:]
this_columns = []
this_unnamed_cols = []
for i, c in enumerate(line):
if c == "":
if have_mi_columns:
col_name = f"Unnamed: {i}_level_{level}"
else:
col_name = f"Unnamed: {i}"
this_unnamed_cols.append(i)
this_columns.append(col_name)
else:
this_columns.append(c)
if not have_mi_columns and self.mangle_dupe_cols:
counts = defaultdict(int)
for i, col in enumerate(this_columns):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
col = f"{col}.{cur_count}"
cur_count = counts[col]
this_columns[i] = col
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but its not in our
# format so save in the buffer, and create an blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
ic = len(self.index_col) if self.index_col is not None else 0
unnamed_count = len(this_unnamed_cols)
if lc != unnamed_count and lc - ic > unnamed_count:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
unnamed_cols.update({this_columns[i] for i in this_unnamed_cols})
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self._clear_buffer()
if names is not None:
if (self.usecols is not None and len(names) != len(self.usecols)) or (
self.usecols is None and len(names) != len(columns[0])
):
raise ValueError(
"Number of passed names did not match "
"number of header fields in the file"
)
if len(columns) > 1:
raise TypeError("Cannot pass names with multi-index columns")
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
num_original_columns = len(names)
columns = [names]
else:
columns = self._handle_usecols(columns, columns[0])
else:
try:
line = self._buffered_line()
except StopIteration:
if not names:
raise EmptyDataError("No columns to parse from file")
line = names[:]
ncols = len(line)
num_original_columns = ncols
if not names:
if self.prefix:
columns = [[f"{self.prefix}{i}" for i in range(ncols)]]
else:
columns = [list(range(ncols))]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) >= num_original_columns:
columns = self._handle_usecols([names], names)
num_original_columns = len(names)
else:
if not callable(self.usecols) and len(names) != len(self.usecols):
raise ValueError(
"Number of passed names did not match number of "
"header fields in the file"
)
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
num_original_columns = ncols
return columns, num_original_columns, unnamed_cols
def _handle_usecols(self, columns, usecols_key):
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
if callable(self.usecols):
col_indices = _evaluate_usecols(self.usecols, usecols_key)
elif any(isinstance(u, str) for u in self.usecols):
if len(columns) > 1:
raise ValueError(
"If using multiple headers, usecols must be integers."
)
col_indices = []
for col in self.usecols:
if isinstance(col, str):
try:
col_indices.append(usecols_key.index(col))
except ValueError:
_validate_usecols_names(self.usecols, usecols_key)
else:
col_indices.append(col)
else:
col_indices = self.usecols
columns = [
[n for i, n in enumerate(column) if i in col_indices]
for column in columns
]
self._col_indices = col_indices
return columns
def _buffered_line(self):
"""
Return a line from buffer, filling buffer if required.
"""
if len(self.buf) > 0:
return self.buf[0]
else:
return self._next_line()
def _check_for_bom(self, first_row):
"""
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
"""
# first_row will be a list, so we need to check
# that that list is not empty before proceeding.
if not first_row:
return first_row
# The first element of this row is the one that could have the
# BOM that we want to remove. Check that the first element is a
# string before proceeding.
if not isinstance(first_row[0], str):
return first_row
# Check that the string is not empty, as that would
# obviously not have a BOM at the start of it.
if not first_row[0]:
return first_row
# Since the string is non-empty, check that it does
# in fact begin with a BOM.
first_elt = first_row[0][0]
if first_elt != _BOM:
return first_row
first_row_bom = first_row[0]
if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:
start = 2
quote = first_row_bom[1]
end = first_row_bom[2:].index(quote) + 2
# Extract the data between the quotation marks
new_row = first_row_bom[start:end]
# Extract any remaining data after the second
# quotation mark.
if len(first_row_bom) > end + 1:
new_row += first_row_bom[end + 1 :]
return [new_row] + first_row[1:]
elif len(first_row_bom) > 1:
return [first_row_bom[1:]]
else:
# First row is just the BOM, so we
# return an empty string.
return [""]
def _is_line_empty(self, line):
"""
Check if a line is empty or not.
Parameters
----------
line : str, array-like
The line of data to check.
Returns
-------
boolean : Whether or not the line is empty.
"""
return not line or all(not x for x in line)
def _next_line(self):
if isinstance(self.data, list):
while self.skipfunc(self.pos):
self.pos += 1
while True:
try:
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
if not self.skip_blank_lines and (
self._is_line_empty(self.data[self.pos - 1]) or line
):
break
elif self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
except IndexError:
raise StopIteration
else:
while self.skipfunc(self.pos):
self.pos += 1
next(self.data)
while True:
orig_line = self._next_iter_line(row_num=self.pos + 1)
self.pos += 1
if orig_line is not None:
line = self._check_comments([orig_line])[0]
if self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
elif self._is_line_empty(orig_line) or line:
break
# This was the first line of the file,
# which could contain the BOM at the
# beginning of it.
if self.pos == 1:
line = self._check_for_bom(line)
self.line_pos += 1
self.buf.append(line)
return line
def _alert_malformed(self, msg, row_num):
"""
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.error_bad_lines:
raise ParserError(msg)
elif self.warn_bad_lines:
base = f"Skipping line {row_num}: "
sys.stderr.write(base + msg + "\n")
def _next_iter_line(self, row_num):
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed.
"""
try:
return next(self.data)
except csv.Error as e:
if self.warn_bad_lines or self.error_bad_lines:
msg = str(e)
if "NULL byte" in msg or "line contains NUL" in msg:
msg = (
"NULL byte detected. This byte "
"cannot be processed in Python's "
"native csv library at the moment, "
"so please pass in engine='c' instead"
)
if self.skipfooter > 0:
reason = (
"Error could possibly be due to "
"parsing errors in the skipped footer rows "
"(the skipfooter keyword is only applied "
"after Python's csv library has parsed "
"all rows)."
)
msg += ". " + reason
self._alert_malformed(msg, row_num)
return None
def _check_comments(self, lines):
if self.comment is None:
return lines
ret = []
for l in lines:
rl = []
for x in l:
if not isinstance(x, str) or self.comment not in x:
rl.append(x)
else:
x = x[: x.find(self.comment)]
if len(x) > 0:
rl.append(x)
break
ret.append(rl)
return ret
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (
len(l) > 1
or len(l) == 1
and (not isinstance(l[0], str) or l[0].strip())
):
ret.append(l)
return ret
def _check_thousands(self, lines):
if self.thousands is None:
return lines
return self._search_replace_num_columns(
lines=lines, search=self.thousands, replace=""
)
def _search_replace_num_columns(self, lines, search, replace):
ret = []
for l in lines:
rl = []
for i, x in enumerate(l):
if (
not isinstance(x, str)
or search not in x
or (self._no_thousands_columns and i in self._no_thousands_columns)
or self.nonnum.search(x.strip())
):
rl.append(x)
else:
rl.append(x.replace(search, replace))
ret.append(rl)
return ret
def _check_decimal(self, lines):
if self.decimal == _parser_defaults["decimal"]:
return lines
return self._search_replace_num_columns(
lines=lines, search=self.decimal, replace="."
)
def _clear_buffer(self):
self.buf = []
_implicit_index = False
def _get_index_name(self, columns):
"""
Try several cases to get lines:
0) There are headers on row 0 and row 1 and their
total summed lengths equals the length of the next line.
Treat row 0 as columns and row 1 as indices
1) Look for implicit index: there are more columns
on row 1 than row 0. If this is true, assume that row
1 lists index columns and row 0 lists normal columns.
2) Get index from the columns if it was listed.
"""
orig_names = list(columns)
columns = list(columns)
try:
line = self._next_line()
except StopIteration:
line = None
try:
next_line = self._next_line()
except StopIteration:
next_line = None
# implicitly index_col=0 b/c 1 fewer column names
implicit_first_cols = 0
if line is not None:
# leave it 0, #2442
# Case 1
if self.index_col is not False:
implicit_first_cols = len(line) - self.num_original_columns
# Case 0
if next_line is not None:
if len(next_line) == len(line) + self.num_original_columns:
# column and index names on diff rows
self.index_col = list(range(len(line)))
self.buf = self.buf[1:]
for c in reversed(line):
columns.insert(0, c)
# Update list of original names to include all indices.
orig_names = list(columns)
self.num_original_columns = len(columns)
return line, orig_names, columns
if implicit_first_cols > 0:
# Case 1
self._implicit_index = True
if self.index_col is None:
self.index_col = list(range(implicit_first_cols))
index_name = None
else:
# Case 2
(index_name, columns_, self.index_col) = _clean_index_names(
columns, self.index_col, self.unnamed_cols
)
return index_name, orig_names, columns
def _rows_to_cols(self, content):
col_len = self.num_original_columns
if self._implicit_index:
col_len += len(self.index_col)
max_len = max(len(row) for row in content)
# Check that there are no rows with too many
# elements in their row (rows with too few
# elements are padded with NaN).
if max_len > col_len and self.index_col is not False and self.usecols is None:
footers = self.skipfooter if self.skipfooter else 0
bad_lines = []
iter_content = enumerate(content)
content_len = len(content)
content = []
for (i, l) in iter_content:
actual_len = len(l)
if actual_len > col_len:
if self.error_bad_lines or self.warn_bad_lines:
row_num = self.pos - (content_len - i + footers)
bad_lines.append((row_num, actual_len))
if self.error_bad_lines:
break
else:
content.append(l)
for row_num, actual_len in bad_lines:
msg = (
f"Expected {col_len} fields in line {row_num + 1}, saw "
f"{actual_len}"
)
if (
self.delimiter
and len(self.delimiter) > 1
and self.quoting != csv.QUOTE_NONE
):
# see gh-13374
reason = (
"Error could possibly be due to quotes being "
"ignored when a multi-char delimiter is used."
)
msg += ". " + reason
self._alert_malformed(msg, row_num + 1)
# see gh-13320
zipped_content = list(lib.to_object_array(content, min_width=col_len).T)
if self.usecols:
if self._implicit_index:
zipped_content = [
a
for i, a in enumerate(zipped_content)
if (
i < len(self.index_col)
or i - len(self.index_col) in self._col_indices
)
]
else:
zipped_content = [
a for i, a in enumerate(zipped_content) if i in self._col_indices
]
return zipped_content
def _get_lines(self, rows=None):
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(self.data, list):
if self.pos > len(self.data):
raise StopIteration
if rows is None:
new_rows = self.data[self.pos :]
new_pos = len(self.data)
else:
new_rows = self.data[self.pos : self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
if self.skiprows:
new_rows = [
row
for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)
]
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
for _ in range(rows):
new_rows.append(next(self.data))
lines.extend(new_rows)
else:
rows = 0
while True:
new_row = self._next_iter_line(row_num=self.pos + rows + 1)
rows += 1
if new_row is not None:
new_rows.append(new_row)
except StopIteration:
if self.skiprows:
new_rows = [
row
for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)
]
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
else:
lines = new_rows
if self.skipfooter:
lines = lines[: -self.skipfooter]
lines = self._check_comments(lines)
if self.skip_blank_lines:
lines = self._remove_empty_lines(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
def _make_date_converter(
date_parser=None, dayfirst=False, infer_datetime_format=False, cache_dates=True
):
def converter(*date_cols):
if date_parser is None:
strs = parsing._concat_date_cols(date_cols)
try:
return tools.to_datetime(
ensure_object(strs),
utc=None,
dayfirst=dayfirst,
errors="ignore",
infer_datetime_format=infer_datetime_format,
cache=cache_dates,
).to_numpy()
except ValueError:
return tools.to_datetime(
parsing.try_parse_dates(strs, dayfirst=dayfirst), cache=cache_dates
)
else:
try:
result = tools.to_datetime(
date_parser(*date_cols), errors="ignore", cache=cache_dates
)
if isinstance(result, datetime.datetime):
raise Exception("scalar parser")
return result
except Exception:
try:
return tools.to_datetime(
parsing.try_parse_dates(
parsing._concat_date_cols(date_cols),
parser=date_parser,
dayfirst=dayfirst,
),
errors="ignore",
)
except Exception:
return generic_parser(date_parser, *date_cols)
return converter
def _process_date_conversion(
data_dict,
converter,
parse_spec,
index_col,
index_names,
columns,
keep_date_col=False,
):
def _isindex(colspec):
return (isinstance(index_col, list) and colspec in index_col) or (
isinstance(index_names, list) and colspec in index_names
)
new_cols = []
new_data = {}
orig_names = columns
columns = list(columns)
date_cols = set()
if parse_spec is None or isinstance(parse_spec, bool):
return data_dict, columns
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
if is_scalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names
)
if new_name in data_dict:
raise ValueError(f"New date column already in dict {new_name}")
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(parse_spec, dict):
# dict of new name to column list
for new_name, colspec in parse_spec.items():
if new_name in data_dict:
raise ValueError(f"Date column {new_name} already in dict")
_, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names
)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _try_convert_dates(parser, colspec, data_dict, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int) and c not in columns:
colnames.append(columns[c])
else:
colnames.append(c)
new_name = "_".join(str(x) for x in colnames)
to_parse = [data_dict[c] for c in colnames if c in data_dict]
new_col = parser(*to_parse)
return new_name, new_col, colnames
def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
na_values = STR_NA_VALUES
else:
na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
old_na_values = na_values.copy()
na_values = {} # Prevent aliasing.
# Convert the values in the na_values dictionary
# into array-likes for further use. This is also
# where we append the default NaN values, provided
# that `keep_default_na=True`.
for k, v in old_na_values.items():
if not is_list_like(v):
v = [v]
if keep_default_na:
v = set(v) | STR_NA_VALUES
na_values[k] = v
na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}
else:
if not is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | STR_NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
def _clean_index_names(columns, index_col, unnamed_cols):
if not _is_index_col(index_col):
return None, columns, index_col
columns = list(columns)
cp_cols = list(columns)
index_names = []
# don't mutate
index_col = list(index_col)
for i, c in enumerate(index_col):
if isinstance(c, str):
index_names.append(c)
for j, name in enumerate(cp_cols):
if name == c:
index_col[i] = j
columns.remove(name)
break
else:
name = cp_cols[c]
columns.remove(name)
index_names.append(name)
# Only clean index names that were placeholders.
for i, name in enumerate(index_names):
if isinstance(name, str) and name in unnamed_cols:
index_names[i] = None
return index_names, columns, index_col
def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be np.object.
default_dtype = dtype or np.object
dtype = defaultdict(lambda: default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: np.object)
# Convert column indexes to column names.
for k, v in _dtype.items():
col = columns[k] if is_integer(k) else k
dtype[col] = v
# Even though we have no data, the "index" of the empty DataFrame
# could for example still be an empty MultiIndex. Thus, we need to
# check whether we have any index columns specified, via either:
#
# 1) index_col (column indices)
# 2) index_names (column names)
#
# Both must be non-null to ensure a successful construction. Otherwise,
# we have to create a generic empty Index.
if (index_col is None or index_col is False) or index_names is None:
index = Index([])
else:
data = [Series([], dtype=dtype[name]) for name in index_names]
index = ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = {col_name: Series([], dtype=dtype[col_name]) for col_name in columns}
return index, columns, col_dict
def _floatify_na_values(na_values):
# create float versions of the na_values
result = set()
for v in na_values:
try:
v = float(v)
if not np.isnan(v):
result.add(v)
except (TypeError, ValueError, OverflowError):
pass
return result
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append(f"{v}.0")
result.append(str(v))
result.append(v)
except (TypeError, ValueError, OverflowError):
pass
try:
result.append(int(x))
except (TypeError, ValueError, OverflowError):
pass
return set(result)
def _get_na_values(col, na_values, na_fvalues, keep_default_na):
"""
Get the NaN values for a given column.
Parameters
----------
col : str
The name of the column.
na_values : array-like, dict
The object listing the NaN values as strings.
na_fvalues : array-like, dict
The object listing the NaN values as floats.
keep_default_na : bool
If `na_values` is a dict, and the column is not mapped in the
dictionary, whether to return the default NaN values or the empty set.
Returns
-------
nan_tuple : A length-two tuple composed of
1) na_values : the string NaN values for that column.
2) na_fvalues : the float NaN values for that column.
"""
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
if keep_default_na:
return STR_NA_VALUES, set()
return set(), set()
else:
return na_values, na_fvalues
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int):
colnames.append(columns[c])
return colnames
class FixedWidthReader(abc.Iterator):
"""
A reader of fixed-width lines.
"""
def __init__(self, f, colspecs, delimiter, comment, skiprows=None, infer_nrows=100):
self.f = f
self.buffer = None
self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t "
self.comment = comment
if colspecs == "infer":
self.colspecs = self.detect_colspecs(
infer_nrows=infer_nrows, skiprows=skiprows
)
else:
self.colspecs = colspecs
if not isinstance(self.colspecs, (tuple, list)):
raise TypeError(
"column specifications must be a list or tuple, "
f"input was a {type(colspecs).__name__}"
)
for colspec in self.colspecs:
if not (
isinstance(colspec, (tuple, list))
and len(colspec) == 2
and isinstance(colspec[0], (int, np.integer, type(None)))
and isinstance(colspec[1], (int, np.integer, type(None)))
):
raise TypeError(
"Each column specification must be "
"2 element tuple or list of integers"
)
def get_rows(self, infer_nrows, skiprows=None):
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= infer_nrows:
break
self.buffer = iter(buffer_rows)
return detect_rows
def detect_colspecs(self, infer_nrows=100, skiprows=None):
# Regex escape the delimiters
delimiters = "".join(r"\{}".format(x) for x in self.delimiter)
pattern = re.compile("([^{}]+)".format(delimiters))
rows = self.get_rows(infer_nrows, skiprows)
if not rows:
raise EmptyDataError("No rows from which to infer column width")
max_len = max(map(len, rows))
mask = np.zeros(max_len + 1, dtype=int)
if self.comment is not None:
rows = [row.partition(self.comment)[0] for row in rows]
for row in rows:
for m in pattern.finditer(row):
mask[m.start() : m.end()] = 1
shifted = np.roll(mask, 1)
shifted[0] = 0
edges = np.where((mask ^ shifted) == 1)[0]
edge_pairs = list(zip(edges[::2], edges[1::2]))
return edge_pairs
def __next__(self):
if self.buffer is not None:
try:
line = next(self.buffer)
except StopIteration:
self.buffer = None
line = next(self.f)
else:
line = next(self.f)
# Note: 'colspecs' is a sequence of half-open intervals.
return [line[fromm:to].strip(self.delimiter) for (fromm, to) in self.colspecs]
class FixedWidthFieldParser(PythonParser):
"""
Specialization that Converts fixed-width fields into DataFrames.
See PythonParser for details.
"""
def __init__(self, f, **kwds):
# Support iterators, convert to a list.
self.colspecs = kwds.pop("colspecs")
self.infer_nrows = kwds.pop("infer_nrows")
PythonParser.__init__(self, f, **kwds)
def _make_reader(self, f):
self.data = FixedWidthReader(
f,
self.colspecs,
self.delimiter,
self.comment,
self.skiprows,
self.infer_nrows,
)
| python | 126,513 |
from __future__ import absolute_import
from six.moves import range
__author__ = 'noe'
import numpy as _np
from pyemma.util.types import ensure_dtraj_list
from pyemma.msm.estimators.maximum_likelihood_hmsm import MaximumLikelihoodHMSM as _MaximumLikelihoodHMSM
from pyemma.msm.models.hmsm import HMSM as _HMSM
from pyemma.msm.estimators.estimated_hmsm import EstimatedHMSM as _EstimatedHMSM
from pyemma.msm.models.hmsm_sampled import SampledHMSM as _SampledHMSM
from pyemma.util.units import TimeUnit
from pyemma._base.progress import ProgressReporter
class BayesianHMSM(_MaximumLikelihoodHMSM, _SampledHMSM, ProgressReporter):
"""Estimator for a Bayesian HMSM
"""
def __init__(self, nstates=2, lag=1, stride='effective', prior='mixed', nsamples=100, init_hmsm=None,
reversible=True, connectivity='largest', observe_active=True, dt_traj='1 step', conf=0.95):
"""
Parameters
----------
nstates : int, optional, default=2
number of hidden states
lag : int, optional, default=1
lagtime to estimate the HMSM at
stride : str or int, default=1
stride between two lagged trajectories extracted from the input
trajectories. Given trajectory s[t], stride and lag will result
in trajectories
s[0], s[tau], s[2 tau], ...
s[stride], s[stride + tau], s[stride + 2 tau], ...
Setting stride = 1 will result in using all data (useful for
maximum likelihood estimator), while a Bayesian estimator requires
a longer stride in order to have statistically uncorrelated
trajectories. Setting stride = None 'effective' uses the largest
neglected timescale as an estimate for the correlation time and
sets the stride accordingly.
prior : str, optional, default='mixed'
prior used in the estimation of the transition matrix. While 'sparse'
would be preferred as it doesn't bias the distribution way from the
maximum-likelihood, this prior is sensitive to loss of connectivity.
Loss of connectivity can occur in the Gibbs sampling algorithm used
here because in each iteration the hidden state sequence is randomly
generated. Once full connectivity is lost in one of these steps, the
current algorithm cannot recover from that. As a solution we suggest
using a prior that ensures that the estimated transition matrix is
connected even if the sampled state sequence is not.
* 'sparse' : the sparse prior proposed in [1]_ which centers the
posterior around the maximum likelihood estimator. This is the
preferred option if there are no connectivity problems. However
this prior is sensitive to loss of connectivity.
* 'uniform' : uniform prior probability for every transition matrix
element. Compared to the sparse prior, 'uniform' adds +1 to
every transition count. Weak prior that ensures connectivity,
but can lead to large biases if some states have small exit
probabilities.
* 'mixed' : ensures connectivity by adding a prior taken from the
maximum likelihood estimate (MLE) of the hidden transition
matrix P. The rows of P are scaled in order to have total
outgoing transition counts of at least 1 out of each state.
While this operation centers the posterior around the MLE, it
can be a very strong prior if states with small exit
probabilities are involved, and can therefore artificially
reduce the error bars.
init_hmsm : :class:`HMSM <pyemma.msm.ui.hmsm.HMSM>`
Single-point estimate of HMSM object around which errors will be evaluated
observe_active : bool, optional, default=True
True: Restricts the observation set to the active states of the MSM.
False: All states are in the observation set.
References
----------
[1] Trendelkamp-Schroer, B., H. Wu, F. Paul and F. Noe: Estimation and
uncertainty of reversible Markov models. J. Chem. Phys. (in review)
Preprint: http://arxiv.org/abs/1507.05990
"""
self.lag = lag
self.stride = stride
self.nstates = nstates
self.prior = prior
self.nsamples = nsamples
self.init_hmsm = init_hmsm
self.reversible = reversible
self.connectivity = connectivity
self.observe_active = observe_active
self.dt_traj = dt_traj
self.timestep_traj = TimeUnit(dt_traj)
self.conf = conf
def _estimate(self, dtrajs):
"""
Return
------
hmsm : :class:`EstimatedHMSM <pyemma.msm.ui.hmsm_estimated.EstimatedHMSM>`
Estimated Hidden Markov state model
"""
# ensure right format
dtrajs = ensure_dtraj_list(dtrajs)
# if no initial MSM is given, estimate it now
if self.init_hmsm is None:
# estimate with store_data=True, because we need an EstimatedHMSM
hmsm_estimator = _MaximumLikelihoodHMSM(lag=self.lag, stride=self.stride, nstates=self.nstates,
reversible=self.reversible, connectivity=self.connectivity,
observe_active=self.observe_active, dt_traj=self.dt_traj)
init_hmsm = hmsm_estimator.estimate(dtrajs) # estimate with lagged trajectories
else:
# check input
assert isinstance(self.init_hmsm, _EstimatedHMSM), 'hmsm must be of type EstimatedHMSM'
init_hmsm = self.init_hmsm
self.nstates = init_hmsm.nstates
self.reversible = init_hmsm.is_reversible
# here we blow up the output matrix (if needed) to the FULL state space because we want to use dtrajs in the
# Bayesian HMM sampler
if self.observe_active:
import msmtools.estimation as msmest
nstates_full = msmest.number_of_states(dtrajs)
# pobs = _np.zeros((init_hmsm.nstates, nstates_full)) # currently unused because that produces zero cols
eps = 0.01 / nstates_full # default output probability, in order to avoid zero columns
# full state space output matrix. make sure there are no zero columns
pobs = eps * _np.ones((self.nstates, nstates_full), dtype=_np.float64)
# fill active states
pobs[:, init_hmsm.observable_set] = _np.maximum(eps, init_hmsm.observation_probabilities)
# renormalize B to make it row-stochastic
pobs /= pobs.sum(axis=1)[:, None]
else:
pobs = init_hmsm.observation_probabilities
# HMM sampler
self._progress_register(self.nsamples, description='Sampling models', stage=0)
def call_back():
self._progress_update(1, stage=0)
from bhmm import discrete_hmm, bayesian_hmm
hmm_mle = discrete_hmm(init_hmsm.transition_matrix, pobs, stationary=True, reversible=self.reversible)
# define prior
if self.prior == 'sparse':
self.prior_count_matrix = _np.zeros((self.nstates, self.nstates), dtype=_np.float64)
elif self.prior == 'uniform':
self.prior_count_matrix = _np.ones((self.nstates, self.nstates), dtype=_np.float64)
elif self.prior == 'mixed':
# C0 = _np.dot(_np.diag(init_hmsm.stationary_distribution), init_hmsm.transition_matrix)
P0 = init_hmsm.transition_matrix
P0_offdiag = P0 - _np.diag(_np.diag(P0))
scaling_factor = 1.0 / _np.sum(P0_offdiag, axis=1)
self.prior_count_matrix = P0 * scaling_factor[:, None]
else:
raise ValueError('Unknown prior mode: '+self.prior)
sampled_hmm = bayesian_hmm(init_hmsm.discrete_trajectories_lagged, hmm_mle, nsample=self.nsamples,
transition_matrix_prior=self.prior_count_matrix, call_back=call_back)
# Samples
sample_Ps = [sampled_hmm.sampled_hmms[i].transition_matrix for i in range(self.nsamples)]
sample_pis = [sampled_hmm.sampled_hmms[i].stationary_distribution for i in range(self.nsamples)]
sample_pobs = [sampled_hmm.sampled_hmms[i].output_model.output_probabilities for i in range(self.nsamples)]
samples = []
for i in range(self.nsamples): # restrict to observable set if necessary
Bobs = sample_pobs[i][:, init_hmsm.observable_set]
sample_pobs[i] = Bobs / Bobs.sum(axis=1)[:, None] # renormalize
samples.append(_HMSM(sample_Ps[i], sample_pobs[i], pi=sample_pis[i], dt_model=init_hmsm.dt_model))
# parametrize self
self._dtrajs_full = dtrajs
self._observable_set = init_hmsm._observable_set
self._dtrajs_obs = init_hmsm._dtrajs_obs
self.set_model_params(samples=samples, P=init_hmsm.transition_matrix, pobs=init_hmsm.observation_probabilities,
dt_model=init_hmsm.dt_model)
return self
| python | 9,309 |
import activitylogs
from events.registry import build_job
activitylogs.subscribe(build_job.BuildJobStartedTriggeredEvent)
activitylogs.subscribe(build_job.BuildJobSoppedTriggeredEvent)
activitylogs.subscribe(build_job.BuildJobDeletedTriggeredEvent)
activitylogs.subscribe(build_job.BuildJobCreatedEvent)
activitylogs.subscribe(build_job.BuildJobUpdatedEvent)
activitylogs.subscribe(build_job.BuildJobViewedEvent)
activitylogs.subscribe(build_job.BuildJobArchivedEvent)
activitylogs.subscribe(build_job.BuildJobRestoredEvent)
activitylogs.subscribe(build_job.BuildJobBookmarkedEvent)
activitylogs.subscribe(build_job.BuildJobUnBookmarkedEvent)
activitylogs.subscribe(build_job.BuildJobLogsViewedEvent)
activitylogs.subscribe(build_job.BuildJobStatusesViewedEvent)
| python | 765 |
import logging
import pytest
from ocs_ci.framework.testlib import (
E2ETest,
skipif_ocs_version,
on_prem_platform_required,
scale,
)
from ocs_ci.ocs import constants
from ocs_ci.ocs import hsbench
logger = logging.getLogger(__name__)
@pytest.fixture(scope="function")
def s3bench(request):
# Create hs s3 benchmark
s3bench = hsbench.HsBench()
s3bench.create_resource_hsbench()
s3bench.install_hsbench()
def teardown():
s3bench.cleanup()
request.addfinalizer(teardown)
return s3bench
@scale
class TestScaleNamespace(E2ETest):
"""
Test creation of a namespace scale resource
"""
@skipif_ocs_version("<4.7")
@pytest.mark.parametrize(
argnames=["bucketclass_dict"],
argvalues=[
pytest.param(
{
"interface": "OC",
"namespace_policy_dict": {
"type": "Single",
"namespacestore_dict": {"aws": [(1, None)]},
},
},
marks=[pytest.mark.polarion_id("OCS-2518")],
),
pytest.param(
{
"interface": "OC",
"namespace_policy_dict": {
"type": "Single",
"namespacestore_dict": {"azure": [(1, None)]},
},
},
marks=[pytest.mark.polarion_id("OCS-2558")],
),
pytest.param(
{
"interface": "OC",
"namespace_policy_dict": {
"type": "Single",
"namespacestore_dict": {"rgw": [(1, None)]},
},
},
marks=[
on_prem_platform_required,
pytest.mark.polarion_id("OCS-2559"),
],
),
pytest.param(
{
"interface": "OC",
"namespace_policy_dict": {
"type": "Cache",
"ttl": 60000,
"namespacestore_dict": {"aws": [(1, "eu-central-1")]},
},
"placement_policy": {
"tiers": [
{"backingStores": [constants.DEFAULT_NOOBAA_BACKINGSTORE]}
]
},
},
marks=[pytest.mark.polarion_id("OCS-2560")],
),
pytest.param(
{
"interface": "OC",
"namespace_policy_dict": {
"type": "Multi",
"namespacestore_dict": {
"aws": [(2, "us-east-2")],
},
},
},
marks=[pytest.mark.polarion_id("OCS-2743")],
),
pytest.param(
{
"interface": "OC",
"namespace_policy_dict": {
"type": "Multi",
"namespacestore_dict": {
"rgw": [(2, None)],
},
},
},
marks=[
on_prem_platform_required,
pytest.mark.polarion_id("OCS-2744"),
],
),
],
ids=[
"Scale-AWS-Single",
"Scale-Azure-Single",
"Scale-RGW-Single",
"Scale-AWS-Cache",
"Scale-AWS-AWS-Multi",
"Scale-RWG-RGW-Multi",
],
)
def test_scale_namespace_bucket_creation_crd(
self,
mcg_obj,
bucket_factory,
bucketclass_dict,
s3bench,
):
"""
Test namespace bucket creation using the MCG CRDs.
Create 50 namespace resources
For each namespace resource, create namespace bucket and start hsbench benchmark
"""
num_s3_obj = 1000
ns_bucket_list = []
for _ in range(50):
ns_bucket_list.append(
bucket_factory(
amount=1,
interface=bucketclass_dict["interface"],
bucketclass=bucketclass_dict,
)[0]
)
for _ in ns_bucket_list:
s3bench.run_benchmark(
num_obj=num_s3_obj,
timeout=7200,
access_key=mcg_obj.access_key_id,
secret_key=mcg_obj.access_key,
end_point="http://s3.openshift-storage.svc/",
run_mode="ipg",
)
| python | 4,746 |
import os
import numpy as np
import keras.backend as K
from keras import metrics
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.layers import Input, MaxPool2D, Activation, BatchNormalization, UpSampling2D, concatenate, LeakyReLU, Conv2D
import os, sys, inspect
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))))
from dataset import *
from utils import *
EPOCHS = 500
BATCH_SIZE = 128
LEARNING_RATE = 0.0001
INPUT_SHAPE = (32, 32, 1)
WEIGHTS = 'model2.hdf5'
MODE = 1 # 1: train - 2: test
data = load_cifar10_data()
np.random.shuffle(data)
Y_channel = data[:, 0, :].reshape(50000, 32, 32, 1)
UV_channel = data[:, 1:, :].reshape(50000, 32, 32, 2)
def exact_acc(y_true, y_pred):
return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
def create_conv(filters, kernel_size, inputs, name=None, bn=True, padding='same', activation='relu'):
conv = Conv2D(filters, kernel_size, padding=padding, kernel_initializer='he_normal', name=name)(inputs)
if bn == True:
conv = BatchNormalization()(conv)
if activation == 'relu':
conv = Activation(activation)(conv)
elif activation == 'leakyrelu':
conv = LeakyReLU()(conv)
return conv
def create_model():
inputs = Input(INPUT_SHAPE)
conv1 = create_conv(64, (3, 3), inputs, 'conv1_1', activation='leakyrelu')
conv1 = create_conv(64, (3, 3), conv1, 'conv1_2', activation='leakyrelu')
pool1 = MaxPool2D((2, 2))(conv1)
conv2 = create_conv(128, (3, 3), pool1, 'conv2_1', activation='leakyrelu')
conv2 = create_conv(128, (3, 3), conv2, 'conv2_2', activation='leakyrelu')
pool2 = MaxPool2D((2, 2))(conv2)
conv3 = create_conv(256, (3, 3), pool2, 'conv3_1', activation='leakyrelu')
conv3 = create_conv(256, (3, 3), conv3, 'conv3_2', activation='leakyrelu')
pool3 = MaxPool2D((2, 2))(conv3)
conv4 = create_conv(512, (3, 3), pool3, 'conv4_1', activation='leakyrelu')
conv4 = create_conv(512, (3, 3), conv4, 'conv4_2', activation='leakyrelu')
pool4 = MaxPool2D((2, 2))(conv4)
conv5 = create_conv(1024, (3, 3), pool4, 'conv5_1', activation='leakyrelu')
conv5 = create_conv(1024, (3, 3), conv5, 'conv5_2', activation='leakyrelu')
up6 = create_conv(512, (2, 2), UpSampling2D((2, 2))(conv5), 'up6', activation='relu')
merge6 = concatenate([conv4, up6], axis=3)
conv6 = create_conv(512, (3, 3), merge6, 'conv6_1', activation='relu')
conv6 = create_conv(512, (3, 3), conv6, 'conv6_2', activation='relu')
up7 = create_conv(256, (2, 2), UpSampling2D((2, 2))(conv6), 'up7', activation='relu')
merge7 = concatenate([conv3, up7], axis=3)
conv7 = create_conv(256, (3, 3), merge7, 'conv7_1', activation='relu')
conv7 = create_conv(256, (3, 3), conv7, 'conv7_2', activation='relu')
up8 = create_conv(128, (2, 2), UpSampling2D((2, 2))(conv7), 'up8', activation='relu')
merge8 = concatenate([conv2, up8], axis=3)
conv8 = create_conv(128, (3, 3), merge8, 'conv8_1', activation='relu')
conv8 = create_conv(128, (3, 3), conv8, 'conv8_2', activation='relu')
up9 = create_conv(64, (2, 2), UpSampling2D((2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = create_conv(64, (3, 3), merge9, 'conv9_1', activation='relu')
conv9 = create_conv(64, (3, 3), conv9, 'conv9_2', activation='relu')
conv9 = Conv2D(2, (1, 1), padding='same', name='conv9_3')(conv9)
model = Model(inputs=inputs, outputs=conv9)
model.compile(optimizer=Adam(LEARNING_RATE),
loss='mean_squared_error',
metrics=['accuracy', exact_acc, metrics.mse, metrics.mae])
return model
model = create_model()
if os.path.exists(WEIGHTS):
model.load_weights(WEIGHTS)
if MODE == 1:
model_checkpoint = ModelCheckpoint(
filepath=WEIGHTS,
monitor='loss',
verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(
monitor='loss',
factor=0.5,
patience=10)
model.fit(
Y_channel,
UV_channel,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_split=0.1,
callbacks=[model_checkpoint, reduce_lr])
elif MODE == 2:
for i in range(45000, 50000):
y = Y_channel[i].T
yuv_original = np.r_[(y, UV_channel[i].T[:1], UV_channel[i].T[1:])]
uv_pred = np.array(model.predict(Y_channel[i][None, :, :, :]))[0]
yuv_pred = np.r_[(y, uv_pred.T[:1], uv_pred.T[1:])]
show_yuv(yuv_original, yuv_pred)
| python | 4,651 |
from tkinter import *
expression=""
def press(num):
global expression
expression = expression + str(num)
equation.set(expression)
def equalpress():
try:
global expression
total = str(eval(expression))
equation.set(total)
expression = ""
except:
equation.set("Error")
expression=""
def clear():
global expression
expression = ""
equation.set("")
if __name__ == "__main__":
root = Tk()
root.geometry("300x185+500+300")
root.resizable(False,False)
root.title("Simple Calculator")
equation = StringVar()
expression_field = Entry(root, textvariable=equation)
expression_field.grid(columnspan=6, ipadx=90)
btn1=Button(root,text="1",fg="white", bg="black",
command=lambda: press(1),height=2, width=9)
btn1.grid(row=2,column=0)
btn2=Button(root,text="2",fg="white", bg="black",
command=lambda: press(2), height=2, width=9)
btn2.grid(row=2,column=1)
btn3=Button(root,text="3",fg="white", bg="black",
command=lambda: press(3), height=2, width=9)
btn3.grid(row=2,column=2)
btn4=Button(root,text="4",fg="white", bg="black",
command=lambda: press(4), height=2, width=9)
btn4.grid(row=3,column=0)
btn5=Button(root,text="5",fg="white", bg="black",
command=lambda: press(5), height=2, width=9)
btn5.grid(row=3,column=1)
btn6=Button(root,text="6",fg="white", bg="black",
command=lambda: press(6), height=2, width=9)
btn6.grid(row=3,column=2)
btn7=Button(root,text="7",fg="white", bg="black",
command=lambda: press(7), height=2, width=9)
btn7.grid(row=4,column=0)
btn8=Button(root,text="8",fg="white", bg="black",
command=lambda: press(8), height=2, width=9)
btn8.grid(row=4,column=1)
btn9=Button(root,text="9",fg="white", bg="black",
command=lambda: press(9), height=2, width=9)
btn9.grid(row=4,column=2)
btn0=Button(root,text="0",fg="white", bg="black",
height=2, width=9)
btn0.grid(row=5,column=0)
btnAdd=Button(root,text="+",fg="white", bg="black",
command=lambda: press("+"), height=2, width=9)
btnAdd.grid(row=2,column=3)
btnSub=Button(root,text="-",fg="white", bg="black",
command=lambda: press("-"),height=2, width=9)
btnSub.grid(row=3,column=3)
btnMul=Button(root,text="*",fg="white", bg="black",
command=lambda: press("*"), height=2, width=9)
btnMul.grid(row=4,column=3)
btnDiv=Button(root,text="/",fg="white", bg="black",
command=lambda: press("/"), height=2, width=9)
btnDiv.grid(row=5,column=3)
btnClear=Button(root,text="AC",fg="white", bg="black",
command=clear, height=2, width=9)
btnClear.grid(row=5,column=1)
btnEqual=Button(root,text="=",fg="white", bg="black",
command=equalpress, height=2, width=9)
btnEqual.grid(row=5,column=2)
root.mainloop() | python | 2,730 |
import abc
import os
from subprocess import call, CalledProcessError
import attr
import six
from pathlib2 import Path
from ....config.defs import (
VCS_REPO_TYPE,
VCS_DIFF,
VCS_STATUS,
VCS_ROOT,
VCS_BRANCH,
VCS_COMMIT_ID,
VCS_REPOSITORY_URL,
)
from ....debugging import get_logger
from .util import get_command_output
class DetectionError(Exception):
pass
@attr.s
class Result(object):
"""" Repository information as queried by a detector """
url = attr.ib(default="")
branch = attr.ib(default="")
commit = attr.ib(default="")
root = attr.ib(default="")
status = attr.ib(default="")
diff = attr.ib(default="")
modified = attr.ib(default=False, type=bool, converter=bool)
def is_empty(self):
return not any(attr.asdict(self).values())
@six.add_metaclass(abc.ABCMeta)
class Detector(object):
""" Base class for repository detection """
"""
Commands are represented using the result class, where each attribute contains
the command used to obtain the value of the same attribute in the actual result.
"""
_fallback = '_fallback'
_remote = '_remote'
@classmethod
def _get_logger(cls):
return get_logger("Repository Detection")
@attr.s
class Commands(object):
"""" Repository information as queried by a detector """
url = attr.ib(default=None, type=list)
branch = attr.ib(default=None, type=list)
commit = attr.ib(default=None, type=list)
root = attr.ib(default=None, type=list)
status = attr.ib(default=None, type=list)
diff = attr.ib(default=None, type=list)
modified = attr.ib(default=None, type=list)
# alternative commands
branch_fallback = attr.ib(default=None, type=list)
diff_fallback = attr.ib(default=None, type=list)
# remote commands
commit_remote = attr.ib(default=None, type=list)
diff_remote = attr.ib(default=None, type=list)
diff_fallback_remote = attr.ib(default=None, type=list)
def __init__(self, type_name, name=None):
self.type_name = type_name
self.name = name or type_name
def _get_commands(self):
""" Returns a RepoInfo instance containing a command for each info attribute """
return self.Commands()
def _get_command_output(self, path, name, command, commands=None, strip=True):
""" Run a command and return its output """
try:
return get_command_output(command, path, strip=strip)
except (CalledProcessError, UnicodeDecodeError) as ex:
if not name.endswith(self._fallback):
fallback_command = attr.asdict(commands or self._get_commands()).get(name + self._fallback)
if fallback_command:
try:
return get_command_output(fallback_command, path, strip=strip)
except (CalledProcessError, UnicodeDecodeError):
pass
self._get_logger().warning("Can't get {} information for {} repo in {}".format(name, self.type_name, path))
# full details only in debug
self._get_logger().debug(
"Can't get {} information for {} repo in {}: {}".format(
name, self.type_name, path, str(ex)
)
)
return ""
def _get_info(self, path, include_diff=False, diff_from_remote=False):
"""
Get repository information.
:param path: Path to repository
:param include_diff: Whether to include the diff command's output (if available)
:param diff_from_remote: Whether to store the remote diff/commit based on the remote commit (not local commit)
:return: RepoInfo instance
"""
path = str(path)
commands = self._get_commands()
if not include_diff:
commands.diff = None
# skip the local commands
if diff_from_remote and commands:
for name, command in attr.asdict(commands).items():
if name.endswith(self._remote) and command:
setattr(commands, name[:-len(self._remote)], None)
info = Result(
**{
name: self._get_command_output(path, name, command, commands=commands, strip=bool(name != 'diff'))
for name, command in attr.asdict(commands).items()
if command and not name.endswith(self._fallback) and not name.endswith(self._remote)
}
)
if diff_from_remote and commands:
for name, command in attr.asdict(commands).items():
if name.endswith(self._remote) and command:
setattr(commands, name[:-len(self._remote)], command+[info.branch])
info = attr.assoc(
info,
**{
name[:-len(self._remote)]: self._get_command_output(
path, name[:-len(self._remote)], command + [info.branch],
commands=commands, strip=not name.startswith('diff'))
for name, command in attr.asdict(commands).items()
if command and (
name.endswith(self._remote) and
not name[:-len(self._remote)].endswith(self._fallback)
)
}
)
# make sure we match the modified with the git remote diff state
info.modified = bool(info.diff)
return info
def _post_process_info(self, info):
# check if there are uncommitted changes in the current repository
return info
def get_info(self, path, include_diff=False, diff_from_remote=False):
"""
Get repository information.
:param path: Path to repository
:param include_diff: Whether to include the diff command's output (if available)
:param diff_from_remote: Whether to store the remote diff/commit based on the remote commit (not local commit)
:return: RepoInfo instance
"""
info = self._get_info(path, include_diff, diff_from_remote=diff_from_remote)
return self._post_process_info(info)
def _is_repo_type(self, script_path):
try:
with open(os.devnull, "wb") as devnull:
return (
call(
[self.type_name, "status"],
stderr=devnull,
stdout=devnull,
cwd=str(script_path),
)
== 0
)
except CalledProcessError:
self._get_logger().warning("Can't get {} status".format(self.type_name))
except (OSError, EnvironmentError, IOError):
# File not found or can't be executed
pass
return False
def exists(self, script_path):
"""
Test whether the given script resides in
a repository type represented by this plugin.
"""
return self._is_repo_type(script_path)
class HgDetector(Detector):
def __init__(self):
super(HgDetector, self).__init__("hg")
def _get_commands(self):
return self.Commands(
url=["hg", "paths", "--verbose"],
branch=["hg", "--debug", "id", "-b"],
commit=["hg", "--debug", "id", "-i"],
root=["hg", "root"],
status=["hg", "status"],
diff=["hg", "diff"],
modified=["hg", "status", "-m"],
)
def _post_process_info(self, info):
if info.url:
info.url = info.url.split(" = ")[1]
if info.commit:
info.commit = info.commit.rstrip("+")
return info
class GitDetector(Detector):
def __init__(self):
super(GitDetector, self).__init__("git")
def _get_commands(self):
return self.Commands(
url=["git", "ls-remote", "--get-url", "origin"],
branch=["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"],
commit=["git", "rev-parse", "HEAD"],
root=["git", "rev-parse", "--show-toplevel"],
status=["git", "status", "-s"],
diff=["git", "diff", "--submodule=diff"],
modified=["git", "ls-files", "-m"],
branch_fallback=["git", "rev-parse", "--abbrev-ref", "HEAD"],
diff_fallback=["git", "diff"],
diff_remote=["git", "diff", "--submodule=diff", ],
commit_remote=["git", "rev-parse", ],
diff_fallback_remote=["git", "diff", ],
)
def _post_process_info(self, info):
# Deprecated code: this was intended to make sure git repository names always
# ended with ".git", but this is not always the case (e.g. Azure Repos)
# if info.url and not info.url.endswith(".git"):
# info.url += ".git"
if (info.branch or "").startswith("origin/"):
info.branch = info.branch[len("origin/"):]
return info
class EnvDetector(Detector):
def __init__(self, type_name):
super(EnvDetector, self).__init__(type_name, "{} environment".format(type_name))
def _is_repo_type(self, script_path):
return VCS_REPO_TYPE.get().lower() == self.type_name and bool(
VCS_REPOSITORY_URL.get()
)
@staticmethod
def _normalize_root(root):
"""
Convert to absolute and squash 'path/../folder'
"""
# noinspection PyBroadException
try:
return os.path.abspath((Path.cwd() / root).absolute().as_posix())
except Exception:
return Path.cwd()
def _get_info(self, _, include_diff=False, diff_from_remote=None):
repository_url = VCS_REPOSITORY_URL.get()
if not repository_url:
raise DetectionError("No VCS environment data")
status = VCS_STATUS.get() or ''
diff = VCS_DIFF.get() or ''
modified = bool(diff or (status and [s for s in status.split('\n') if s.strip().startswith('M ')]))
if modified and not diff:
diff = '# Repository modified, but no git diff could be extracted.'
return Result(
url=repository_url,
branch=VCS_BRANCH.get(),
commit=VCS_COMMIT_ID.get(),
root=VCS_ROOT.get(converter=self._normalize_root),
status=status,
diff=diff,
modified=modified,
)
class GitEnvDetector(EnvDetector):
def __init__(self):
super(GitEnvDetector, self).__init__("git")
class HgEnvDetector(EnvDetector):
def __init__(self):
super(HgEnvDetector, self).__init__("hg")
| python | 10,736 |
'''
Created by auto_sdk on 2014.10.14
'''
from aliyun.api.base import RestApi
class Rds20130528RevokeAccountPrivilegeRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AccountName = None
self.DBInstanceId = None
self.DBName = None
def getapiname(self):
return 'rds.aliyuncs.com.RevokeAccountPrivilege.2013-05-28'
| python | 401 |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for seq module."""
import array
import copy
import unittest
import warnings
from Bio import BiopythonWarning
from Bio import Alphabet
from Bio import Seq
from Bio.Alphabet import IUPAC, Gapped
from Bio.Data.IUPACData import (ambiguous_dna_complement,
ambiguous_rna_complement,
ambiguous_dna_values, ambiguous_rna_values)
from Bio.Data.CodonTable import TranslationError, standard_dna_table
from Bio.Seq import MutableSeq
test_seqs = [
Seq.Seq("TCAAAAGGATGCATCATG", IUPAC.unambiguous_dna),
Seq.Seq("T", IUPAC.ambiguous_dna),
Seq.Seq("ATGAAACTG"),
Seq.Seq("ATGAARCTG"),
Seq.Seq("AWGAARCKG"), # Note no U or T
Seq.Seq("".join(ambiguous_rna_values)),
Seq.Seq("".join(ambiguous_dna_values)),
Seq.Seq("".join(ambiguous_rna_values), Alphabet.generic_rna),
Seq.Seq("".join(ambiguous_dna_values), Alphabet.generic_dna),
Seq.Seq("".join(ambiguous_rna_values), IUPAC.IUPACAmbiguousRNA()),
Seq.Seq("".join(ambiguous_dna_values), IUPAC.IUPACAmbiguousDNA()),
Seq.Seq("AWGAARCKG", Alphabet.generic_dna),
Seq.Seq("AUGAAACUG", Alphabet.generic_rna),
Seq.Seq("ATGAAACTG", IUPAC.unambiguous_dna),
Seq.Seq("ATGAAA-CTG", Alphabet.Gapped(IUPAC.unambiguous_dna)),
Seq.Seq("ATGAAACTGWN", IUPAC.ambiguous_dna),
Seq.Seq("AUGAAACUG", Alphabet.generic_rna),
Seq.Seq("AUGAAA==CUG", Alphabet.Gapped(Alphabet.generic_rna, "=")),
Seq.Seq("AUGAAACUG", IUPAC.unambiguous_rna),
Seq.Seq("AUGAAACUGWN", IUPAC.ambiguous_rna),
Seq.Seq("ATGAAACTG", Alphabet.generic_nucleotide),
Seq.Seq("AUGAAACTG", Alphabet.generic_nucleotide), # U and T
Seq.MutableSeq("ATGAAACTG", Alphabet.generic_dna),
Seq.MutableSeq("AUGaaaCUG", IUPAC.unambiguous_rna),
Seq.Seq("ACTGTCGTCT", Alphabet.generic_protein),
]
protein_seqs = [
Seq.Seq("ATCGPK", IUPAC.protein),
Seq.Seq("T.CGPK", Alphabet.Gapped(IUPAC.protein, ".")),
Seq.Seq("T-CGPK", Alphabet.Gapped(IUPAC.protein, "-")),
Seq.Seq("MEDG-KRXR*",
Alphabet.Gapped(Alphabet.HasStopCodon(IUPAC.extended_protein, "*"),
"-")),
Seq.MutableSeq("ME-K-DRXR*XU",
Alphabet.Gapped(Alphabet.HasStopCodon(
IUPAC.extended_protein, "*"), "-")),
Seq.Seq("MEDG-KRXR@",
Alphabet.HasStopCodon(Alphabet.Gapped(IUPAC.extended_protein, "-"),
"@")),
Seq.Seq("ME-KR@",
Alphabet.HasStopCodon(Alphabet.Gapped(IUPAC.protein, "-"), "@")),
Seq.Seq("MEDG.KRXR@",
Alphabet.Gapped(Alphabet.HasStopCodon(IUPAC.extended_protein, "@"),
".")),
]
class TestSeq(unittest.TestCase):
def setUp(self):
self.s = Seq.Seq("TCAAAAGGATGCATCATG", IUPAC.unambiguous_dna)
def test_as_string(self):
"""Test converting Seq to string."""
self.assertEqual("TCAAAAGGATGCATCATG", str(self.s))
def test_construction_using_a_seq_object(self):
"""Test using a Seq object to initialize another Seq object."""
with self.assertRaises(TypeError):
Seq.Seq(self.s)
def test_repr(self):
"""Test representation of Seq object."""
self.assertEqual("Seq('TCAAAAGGATGCATCATG', IUPACUnambiguousDNA())",
repr(self.s))
def test_truncated_repr(self):
seq = "TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGA"
expected = (
"Seq('TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATG...GGA', "
"IUPACAmbiguousDNA())"
)
self.assertEqual(expected, repr(Seq.Seq(seq, IUPAC.ambiguous_dna)))
def test_length(self):
"""Test len method on Seq object."""
self.assertEqual(18, len(self.s))
def test_first_nucleotide(self):
"""Test getting first nucleotide of Seq."""
self.assertEqual("T", self.s[0])
def test_last_nucleotide(self):
"""Test getting last nucleotide of Seq."""
self.assertEqual("G", self.s[-1])
def test_slicing(self):
"""Test slicing of Seq."""
self.assertEqual("AA", str(self.s[3:5]))
def test_reverse(self):
"""Test reverse using -1 stride."""
self.assertEqual("GTACTACGTAGGAAAACT", self.s[::-1])
def test_extract_third_nucleotide(self):
"""Test extracting every third nucleotide (slicing with stride 3)."""
self.assertEqual("TAGTAA", str(self.s[0::3]))
self.assertEqual("CAGGTT", str(self.s[1::3]))
self.assertEqual("AAACCG", str(self.s[2::3]))
def test_alphabet_letters(self):
"""Test nucleotides in DNA Seq."""
self.assertEqual("GATC", self.s.alphabet.letters)
def test_alphabet(self):
"""Test alphabet of derived Seq object."""
t = Seq.Seq("T", IUPAC.unambiguous_dna)
u = self.s + t
self.assertEqual("IUPACUnambiguousDNA()", str(u.alphabet))
def test_length_concatenated_unambiguous_seq(self):
"""Test length of concatenated Seq object with unambiguous DNA."""
t = Seq.Seq("T", IUPAC.unambiguous_dna)
u = self.s + t
self.assertEqual(19, len(u))
def test_concatenation_of_seq(self):
t = Seq.Seq("T", IUPAC.unambiguous_dna)
u = self.s + t
self.assertEqual(str(self.s) + "T", str(u))
def test_concatenation_error(self):
"""DNA Seq objects cannot be concatenated with Protein Seq objects."""
with self.assertRaises(TypeError):
self.s + Seq.Seq("T", IUPAC.protein)
def test_concatenation_of_ambiguous_and_unambiguous_dna(self):
"""Concatenate Seq object with ambiguous and unambiguous DNA returns ambiguous Seq."""
t = Seq.Seq("T", IUPAC.ambiguous_dna)
u = self.s + t
self.assertEqual("IUPACAmbiguousDNA()", str(u.alphabet))
def test_ungap(self):
self.assertEqual("ATCCCA", str(Seq.Seq("ATC-CCA").ungap("-")))
with self.assertRaises(ValueError):
Seq.Seq("ATC-CCA").ungap("--")
with self.assertRaises(ValueError):
Seq.Seq("ATC-CCA").ungap()
class TestSeqStringMethods(unittest.TestCase):
def setUp(self):
self.s = Seq.Seq("TCAAAAGGATGCATCATG", IUPAC.unambiguous_dna)
self.dna = [
Seq.Seq("ATCG", IUPAC.ambiguous_dna),
Seq.Seq("gtca", Alphabet.generic_dna),
Seq.MutableSeq("GGTCA", Alphabet.generic_dna),
Seq.Seq("CTG-CA", Alphabet.Gapped(IUPAC.unambiguous_dna, "-")),
]
self.rna = [
Seq.Seq("AUUUCG", IUPAC.ambiguous_rna),
Seq.MutableSeq("AUUCG", IUPAC.ambiguous_rna),
Seq.Seq("uCAg", Alphabet.generic_rna),
Seq.MutableSeq("UC-AG",
Alphabet.Gapped(Alphabet.generic_rna, "-")),
Seq.Seq("U.CAG", Alphabet.Gapped(Alphabet.generic_rna, ".")),
]
self.nuc = [Seq.Seq("ATCG", Alphabet.generic_nucleotide)]
self.protein = [
Seq.Seq("ATCGPK", IUPAC.protein),
Seq.Seq("atcGPK", Alphabet.generic_protein),
Seq.Seq("T.CGPK", Alphabet.Gapped(IUPAC.protein, ".")),
Seq.Seq("T-CGPK", Alphabet.Gapped(IUPAC.protein, "-")),
Seq.Seq("MEDG-KRXR*",
Alphabet.Gapped(
Alphabet.HasStopCodon(IUPAC.extended_protein, "*"),
"-")),
Seq.MutableSeq("ME-K-DRXR*XU",
Alphabet.Gapped(
Alphabet.HasStopCodon(IUPAC.extended_protein,
"*"), "-")),
Seq.Seq("MEDG-KRXR@",
Alphabet.HasStopCodon(
Alphabet.Gapped(IUPAC.extended_protein, "-"), "@")),
Seq.Seq("ME-KR@",
Alphabet.HasStopCodon(Alphabet.Gapped(IUPAC.protein, "-"),
"@")),
Seq.Seq("MEDG.KRXR@",
Alphabet.Gapped(Alphabet.HasStopCodon(
IUPAC.extended_protein, "@"), ".")),
]
self.test_chars = ["-", Seq.Seq("-"), Seq.Seq("*"), "-X@"]
def test_string_methods(self):
for a in self.dna + self.rna + self.nuc + self.protein:
if isinstance(a, Seq.Seq):
self.assertEqual(str(a.strip()), str(a).strip())
self.assertEqual(str(a.lstrip()), str(a).lstrip())
self.assertEqual(str(a.rstrip()), str(a).rstrip())
self.assertEqual(str(a.lower()), str(a).lower())
self.assertEqual(str(a.upper()), str(a).upper())
def test_hash(self):
with warnings.catch_warnings(record=True):
hash(self.s)
def test_equal_comparison_of_incompatible_alphabets(self):
"""Test __eq__ comparison method."""
with warnings.catch_warnings(record=True):
Seq.Seq("TCAAAA", IUPAC.ambiguous_dna) == \
Seq.Seq("TCAAAA", IUPAC.ambiguous_rna)
def test_not_equal_comparsion(self):
"""Test __ne__ comparison method."""
self.assertNotEqual(Seq.Seq("TCAAA", IUPAC.ambiguous_dna),
Seq.Seq("TCAAAA", IUPAC.ambiguous_dna))
def test_less_than_comparison(self):
"""Test __lt__ comparison method."""
self.assertTrue(self.s[:-1] < self.s)
def test_less_than_comparison_of_incompatible_alphabets(self):
"""Test incompatible alphabet __lt__ comparison method."""
seq1 = Seq.Seq("TCAAA", IUPAC.ambiguous_dna)
seq2 = Seq.Seq("UCAAAA", IUPAC.ambiguous_rna)
with self.assertWarns(BiopythonWarning):
self.assertTrue(seq1 < seq2)
def test_less_than_comparison_of_incompatible_types(self):
"""Test incompatible types __lt__ comparison method."""
with self.assertRaises(TypeError):
self.s < 1
def test_less_than_or_equal_comparison(self):
"""Test __le__ comparison method."""
self.assertTrue(self.s <= self.s)
def test_less_than_or_equal_comparison_of_incompatible_alphabets(self):
"""Test incompatible alphabet __le__ comparison method."""
seq1 = Seq.Seq("TCAAA", IUPAC.ambiguous_dna)
seq2 = Seq.Seq("UCAAAA", IUPAC.ambiguous_rna)
with self.assertWarns(BiopythonWarning):
self.assertTrue(seq1 <= seq2)
def test_less_than_or_equal_comparison_of_incompatible_types(self):
"""Test incompatible types __le__ comparison method."""
with self.assertRaises(TypeError):
self.s <= 1
def test_greater_than_comparison(self):
"""Test __gt__ comparison method."""
self.assertTrue(self.s > self.s[:-1])
def test_greater_than_comparison_of_incompatible_alphabets(self):
"""Test incompatible alphabet __gt__ comparison method."""
seq1 = Seq.Seq("TCAAA", IUPAC.ambiguous_dna)
seq2 = Seq.Seq("UCAAAA", IUPAC.ambiguous_rna)
with self.assertWarns(BiopythonWarning):
self.assertTrue(seq2 > seq1)
def test_greater_than_comparison_of_incompatible_types(self):
"""Test incompatible types __gt__ comparison method."""
with self.assertRaises(TypeError):
self.s > 1
def test_greater_than_or_equal_comparison(self):
"""Test __ge__ comparison method."""
self.assertTrue(self.s >= self.s)
def test_greater_than_or_equal_comparison_of_incompatible_alphabets(self):
"""Test incompatible alphabet __ge__ comparison method."""
seq1 = Seq.Seq("TCAAA", IUPAC.ambiguous_dna)
seq2 = Seq.Seq("UCAAAA", IUPAC.ambiguous_rna)
with self.assertWarns(BiopythonWarning):
self.assertTrue(seq2 >= seq1)
def test_greater_than_or_equal_comparison_of_incompatible_types(self):
"""Test incompatible types __ge__ comparison method."""
with self.assertRaises(TypeError):
self.s >= 1
def test_add_method_using_wrong_object(self):
with self.assertRaises(TypeError):
self.s + {}
def test_radd_method(self):
self.assertEqual("TCAAAAGGATGCATCATGTCAAAAGGATGCATCATG",
str(self.s.__radd__(self.s)))
def test_radd_method_using_incompatible_alphabets(self):
rna_seq = Seq.Seq("UCAAAA", IUPAC.ambiguous_rna)
with self.assertRaises(TypeError):
self.s.__radd__(rna_seq)
def test_radd_method_using_wrong_object(self):
with self.assertRaises(TypeError):
self.s.__radd__({})
def test_contains_method(self):
self.assertIn("AAAA", self.s)
def test_startswith(self):
self.assertTrue(self.s.startswith("TCA"))
self.assertTrue(self.s.startswith(("CAA", "CTA"), 1))
def test_endswith(self):
self.assertTrue(self.s.endswith("ATG"))
self.assertTrue(self.s.endswith(("ATG", "CTA")))
def test_append_nucleotides(self):
self.test_chars.append(Seq.Seq("A", IUPAC.ambiguous_dna))
self.test_chars.append(Seq.Seq("A", IUPAC.ambiguous_rna))
self.test_chars.append(Seq.Seq("A", Alphabet.generic_nucleotide))
self.assertEqual(7, len(self.test_chars))
def test_append_proteins(self):
self.test_chars.append(Seq.Seq("K", Alphabet.generic_protein))
self.test_chars.append(Seq.Seq("K-",
Alphabet.Gapped(
Alphabet.generic_protein, "-")))
self.test_chars.append(Seq.Seq("K@",
Alphabet.Gapped(IUPAC.protein, "@")))
self.assertEqual(7, len(self.test_chars))
def test_exception_when_clashing_alphabets(self):
"""Test by setting up clashing alphabet sequences."""
b = Seq.Seq("-", Alphabet.generic_nucleotide)
self.assertRaises(TypeError, self.protein[0].strip, b)
b = Seq.Seq("-", Alphabet.generic_protein)
self.assertRaises(TypeError, self.dna[0].strip, b)
def test_stripping_characters(self):
for a in self.dna + self.rna + self.nuc + self.protein:
for char in self.test_chars:
str_char = str(char)
if isinstance(a, Seq.Seq):
self.assertEqual(str(a.strip(char)),
str(a).strip(str_char))
self.assertEqual(str(a.lstrip(char)),
str(a).lstrip(str_char))
self.assertEqual(str(a.rstrip(char)),
str(a).rstrip(str_char))
def test_finding_characters(self):
for a in self.dna + self.rna + self.nuc + self.protein:
for char in self.test_chars:
str_char = str(char)
if isinstance(a, Seq.Seq):
self.assertEqual(a.find(char), str(a).find(str_char))
self.assertEqual(a.find(char, 2, -2),
str(a).find(str_char, 2, -2))
self.assertEqual(a.rfind(char), str(a).rfind(str_char))
self.assertEqual(a.rfind(char, 2, -2),
str(a).rfind(str_char, 2, -2))
def test_counting_characters(self):
for a in self.dna + self.rna + self.nuc + self.protein:
for char in self.test_chars:
str_char = str(char)
if isinstance(a, Seq.Seq):
self.assertEqual(a.count(char), str(a).count(str_char))
self.assertEqual(a.count(char, 2, -2),
str(a).count(str_char, 2, -2))
def test_splits(self):
for a in self.dna + self.rna + self.nuc + self.protein:
for char in self.test_chars:
str_char = str(char)
if isinstance(a, Seq.Seq):
self.assertEqual([str(x) for x in a.split(char)],
str(a).split(str_char))
self.assertEqual([str(x) for x in a.rsplit(char)],
str(a).rsplit(str_char))
for max_sep in [0, 1, 2, 999]:
self.assertEqual(
[str(x) for x in a.split(char, max_sep)],
str(a).split(str_char, max_sep))
class TestSeqAddition(unittest.TestCase):
def setUp(self):
self.dna = [
Seq.Seq("ATCG", IUPAC.ambiguous_dna),
Seq.Seq("gtca", Alphabet.generic_dna),
Seq.MutableSeq("GGTCA", Alphabet.generic_dna),
Seq.Seq("CTG-CA", Alphabet.Gapped(IUPAC.unambiguous_dna, "-")),
"TGGTCA",
]
self.rna = [
Seq.Seq("AUUUCG", IUPAC.ambiguous_rna),
Seq.MutableSeq("AUUCG", IUPAC.ambiguous_rna),
Seq.Seq("uCAg", Alphabet.generic_rna),
Seq.MutableSeq("UC-AG",
Alphabet.Gapped(Alphabet.generic_rna, "-")),
Seq.Seq("U.CAG",
Alphabet.Gapped(Alphabet.generic_rna, ".")),
"UGCAU",
]
self.nuc = [
Seq.Seq("ATCG", Alphabet.generic_nucleotide),
"UUUTTTACG",
]
self.protein = [
Seq.Seq("ATCGPK", IUPAC.protein),
Seq.Seq("atcGPK", Alphabet.generic_protein),
Seq.Seq("T.CGPK", Alphabet.Gapped(IUPAC.protein, ".")),
Seq.Seq("T-CGPK", Alphabet.Gapped(IUPAC.protein, "-")),
Seq.Seq("MEDG-KRXR*",
Alphabet.Gapped(Alphabet.HasStopCodon(
IUPAC.extended_protein, "*"), "-")),
Seq.MutableSeq("ME-K-DRXR*XU",
Alphabet.Gapped(Alphabet.HasStopCodon(
IUPAC.extended_protein, "*"), "-")),
"TEDDF",
]
def test_addition_dna_rna_with_generic_nucleotides(self):
for a in self.dna + self.rna:
for b in self.nuc:
c = a + b
self.assertEqual(str(c), str(a) + str(b))
def test_addition_dna_rna_with_generic_nucleotides_inplace(self):
for a in self.dna + self.rna:
for b in self.nuc:
c = b + a
b += a # can't change 'a' as need value next iteration
self.assertEqual(c, b)
def test_addition_rna_with_rna(self):
self.rna.pop(3)
for a in self.rna:
for b in self.rna:
c = a + b
self.assertEqual(str(c), str(a) + str(b))
def test_addition_rna_with_rna_inplace(self):
self.rna.pop(3)
for a in self.rna:
for b in self.rna:
c = b + a
b += a
self.assertEqual(c, b)
def test_exception_when_added_rna_has_more_than_one_gap_type(self):
"""Test resulting sequence has gap types '-' and '.'."""
with self.assertRaises(ValueError):
self.rna[3] + self.rna[4]
with self.assertRaises(ValueError):
self.rna[3] += self.rna[4]
def test_addition_dna_with_dna(self):
for a in self.dna:
for b in self.dna:
c = a + b
self.assertEqual(str(c), str(a) + str(b))
def test_addition_dna_with_dna_inplace(self):
for a in self.dna:
for b in self.dna:
c = b + a
b += a
self.assertEqual(c, b)
def test_addition_dna_with_rna(self):
self.dna.pop(4)
self.rna.pop(5)
for a in self.dna:
for b in self.rna:
with self.assertRaises(TypeError):
a + b
with self.assertRaises(TypeError):
b + a
with self.assertRaises(TypeError):
a += b
with self.assertRaises(TypeError):
b += a
def test_addition_proteins(self):
self.protein.pop(2)
for a in self.protein:
for b in self.protein:
c = a + b
self.assertEqual(str(c), str(a) + str(b))
def test_addition_proteins_inplace(self):
self.protein.pop(2)
for a in self.protein:
for b in self.protein:
c = b + a
b += a
self.assertEqual(c, b)
def test_exception_when_added_protein_has_more_than_one_gap_type(self):
"""Test resulting protein has gap types '-' and '.'."""
a = Seq.Seq("T.CGPK", Alphabet.Gapped(IUPAC.protein, "."))
b = Seq.Seq("T-CGPK", Alphabet.Gapped(IUPAC.protein, "-"))
with self.assertRaises(ValueError):
a + b
with self.assertRaises(ValueError):
a += b
def test_exception_when_added_protein_has_several_stop_codon_types(self):
"""Test resulting protein has stop codon types '*' and '@'."""
a = Seq.Seq("MEDG-KRXR@", Alphabet.HasStopCodon(
Alphabet.Gapped(IUPAC.extended_protein, "-"), "@"))
b = Seq.Seq("MEDG-KRXR*", Alphabet.Gapped(
Alphabet.HasStopCodon(IUPAC.extended_protein, "*"), "-"))
with self.assertRaises(ValueError):
a + b
with self.assertRaises(ValueError):
a += b
def test_exception_when_adding_protein_with_nucleotides(self):
for a in self.protein[0:5]:
for b in self.dna[0:3] + self.rna[0:4]:
with self.assertRaises(TypeError):
a + b
with self.assertRaises(TypeError):
a += b
def test_adding_generic_nucleotide_with_other_nucleotides(self):
for a in self.nuc:
for b in self.dna + self.rna + self.nuc:
c = a + b
self.assertEqual(str(c), str(a) + str(b))
def test_adding_generic_nucleotide_with_other_nucleotides_inplace(self):
for a in self.nuc:
for b in self.dna + self.rna + self.nuc:
c = b + a
b += a
self.assertEqual(c, b)
class TestSeqMultiplication(unittest.TestCase):
def test_mul_method(self):
"""Test mul method; relies on addition method."""
for seq in test_seqs + protein_seqs:
self.assertEqual(seq * 3, seq + seq + seq)
def test_mul_method_exceptions(self):
"""Test mul method exceptions."""
for seq in test_seqs + protein_seqs:
with self.assertRaises(TypeError):
seq * 3.0
with self.assertRaises(TypeError):
seq * ""
def test_rmul_method(self):
"""Test rmul method; relies on addition method."""
for seq in test_seqs + protein_seqs:
self.assertEqual(3 * seq, seq + seq + seq)
def test_rmul_method_exceptions(self):
"""Test rmul method exceptions."""
for seq in test_seqs + protein_seqs:
with self.assertRaises(TypeError):
3.0 * seq
with self.assertRaises(TypeError):
"" * seq
def test_imul_method(self):
"""Test imul method; relies on addition and mull methods."""
for seq in test_seqs + protein_seqs:
original_seq = seq * 1 # make a copy
seq *= 3
self.assertEqual(seq, original_seq + original_seq + original_seq)
def test_imul_method_exceptions(self):
"""Test imul method exceptions."""
for seq in test_seqs + protein_seqs:
with self.assertRaises(TypeError):
seq *= 3.0
with self.assertRaises(TypeError):
seq *= ""
class TestMutableSeq(unittest.TestCase):
def setUp(self):
self.s = Seq.Seq("TCAAAAGGATGCATCATG", IUPAC.unambiguous_dna)
self.mutable_s = MutableSeq("TCAAAAGGATGCATCATG", IUPAC.ambiguous_dna)
def test_mutableseq_creation(self):
"""Test creating MutableSeqs in multiple ways."""
mutable_s = MutableSeq("TCAAAAGGATGCATCATG", IUPAC.ambiguous_dna)
self.assertIsInstance(mutable_s, MutableSeq, "Creating MutableSeq")
mutable_s = self.s.tomutable()
self.assertIsInstance(mutable_s, MutableSeq,
"Converting Seq to mutable")
array_seq = MutableSeq(array.array("u", "TCAAAAGGATGCATCATG"),
IUPAC.ambiguous_dna)
self.assertIsInstance(array_seq, MutableSeq,
"Creating MutableSeq using array")
def test_repr(self):
self.assertEqual(
"MutableSeq('TCAAAAGGATGCATCATG', IUPACAmbiguousDNA())",
repr(self.mutable_s))
def test_truncated_repr(self):
seq = "TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGA"
expected = (
"MutableSeq('TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATG...GGA', "
"IUPACAmbiguousDNA())"
)
self.assertEqual(expected, repr(MutableSeq(seq, IUPAC.ambiguous_dna)))
def test_equal_comparison(self):
"""Test __eq__ comparison method."""
self.assertEqual(self.mutable_s, "TCAAAAGGATGCATCATG")
def test_equal_comparison_of_incompatible_alphabets(self):
with self.assertWarns(BiopythonWarning):
self.mutable_s == MutableSeq("UCAAAAGGA", IUPAC.ambiguous_rna)
def test_not_equal_comparison(self):
"""Test __ne__ comparison method."""
self.assertNotEqual(self.mutable_s, "other thing")
def test_less_than_comparison(self):
"""Test __lt__ comparison method."""
self.assertTrue(self.mutable_s[:-1] < self.mutable_s)
def test_less_than_comparison_of_incompatible_alphabets(self):
with self.assertWarns(BiopythonWarning):
self.mutable_s[:-1] < MutableSeq("UCAAAAGGAUGCAUCAUG",
IUPAC.ambiguous_rna)
def test_less_than_comparison_of_incompatible_types(self):
with self.assertRaises(TypeError):
self.mutable_s < 1
def test_less_than_comparison_without_alphabet(self):
self.assertTrue(self.mutable_s[:-1] < "TCAAAAGGATGCATCATG")
def test_less_than_or_equal_comparison(self):
"""Test __le__ comparison method."""
self.assertTrue(self.mutable_s[:-1] <= self.mutable_s)
def test_less_than_or_equal_comparison_of_incompatible_alphabets(self):
with self.assertWarns(BiopythonWarning):
self.mutable_s[:-1] <= MutableSeq("UCAAAAGGAUGCAUCAUG",
IUPAC.ambiguous_rna)
def test_less_than_or_equal_comparison_of_incompatible_types(self):
with self.assertRaises(TypeError):
self.mutable_s <= 1
def test_less_than_or_equal_comparison_without_alphabet(self):
self.assertTrue(self.mutable_s[:-1] <= "TCAAAAGGATGCATCATG")
def test_greater_than_comparison(self):
"""Test __gt__ comparison method."""
self.assertTrue(self.mutable_s > self.mutable_s[:-1])
def test_greater_than_comparison_of_incompatible_alphabets(self):
with self.assertWarns(BiopythonWarning):
self.mutable_s[:-1] > MutableSeq("UCAAAAGGAUGCAUCAUG",
IUPAC.ambiguous_rna)
def test_greater_than_comparison_of_incompatible_types(self):
with self.assertRaises(TypeError):
self.mutable_s > 1
def test_greater_than_comparison_without_alphabet(self):
self.assertTrue(self.mutable_s > "TCAAAAGGATGCATCAT")
def test_greater_than_or_equal_comparison(self):
"""Test __ge__ comparison method."""
self.assertTrue(self.mutable_s >= self.mutable_s)
def test_greater_than_or_equal_comparison_of_incompatible_alphabets(self):
with self.assertWarns(BiopythonWarning):
self.mutable_s[:-1] >= MutableSeq("UCAAAAGGAUGCAUCAUG",
IUPAC.ambiguous_rna)
def test_greater_than_or_equal_comparison_of_incompatible_types(self):
with self.assertRaises(TypeError):
self.mutable_s >= 1
def test_greater_than_or_equal_comparison_without_alphabet(self):
self.assertTrue(self.mutable_s >= "TCAAAAGGATGCATCATG")
def test_add_method(self):
"""Test adding wrong type to MutableSeq."""
with self.assertRaises(TypeError):
self.mutable_s + 1234
def test_radd_method(self):
self.assertEqual("TCAAAAGGATGCATCATGTCAAAAGGATGCATCATG",
self.mutable_s.__radd__(self.mutable_s))
def test_radd_method_incompatible_alphabets(self):
with self.assertRaises(TypeError):
self.mutable_s.__radd__(MutableSeq("UCAAAAGGA",
IUPAC.ambiguous_rna))
def test_radd_method_using_seq_object(self):
self.assertEqual("TCAAAAGGATGCATCATGTCAAAAGGATGCATCATG",
self.mutable_s.__radd__(self.s))
def test_radd_method_wrong_type(self):
with self.assertRaises(TypeError):
self.mutable_s.__radd__(1234)
def test_as_string(self):
self.assertEqual("TCAAAAGGATGCATCATG", str(self.mutable_s))
def test_length(self):
self.assertEqual(18, len(self.mutable_s))
def test_converting_to_immutable(self):
self.assertIsInstance(self.mutable_s.toseq(), Seq.Seq)
def test_first_nucleotide(self):
self.assertEqual("T", self.mutable_s[0])
def test_setting_slices(self):
self.assertEqual(MutableSeq("CAAA", IUPAC.ambiguous_dna),
self.mutable_s[1:5], "Slice mutable seq")
self.mutable_s[1:3] = "GAT"
self.assertEqual(MutableSeq("TGATAAAGGATGCATCATG",
IUPAC.ambiguous_dna),
self.mutable_s,
"Set slice with string and adding extra nucleotide")
self.mutable_s[1:3] = self.mutable_s[5:7]
self.assertEqual(MutableSeq("TAATAAAGGATGCATCATG",
IUPAC.ambiguous_dna),
self.mutable_s, "Set slice with MutableSeq")
self.mutable_s[1:3] = array.array("u", "GAT")
self.assertEqual(MutableSeq("TGATTAAAGGATGCATCATG",
IUPAC.ambiguous_dna),
self.mutable_s, "Set slice with array")
def test_setting_item(self):
self.mutable_s[3] = "G"
self.assertEqual(MutableSeq("TCAGAAGGATGCATCATG", IUPAC.ambiguous_dna),
self.mutable_s)
def test_deleting_slice(self):
del self.mutable_s[4:5]
self.assertEqual(MutableSeq("TCAAAGGATGCATCATG", IUPAC.ambiguous_dna),
self.mutable_s)
def test_deleting_item(self):
del self.mutable_s[3]
self.assertEqual(MutableSeq("TCAAAGGATGCATCATG", IUPAC.ambiguous_dna),
self.mutable_s)
def test_appending(self):
self.mutable_s.append("C")
self.assertEqual(MutableSeq("TCAAAAGGATGCATCATGC",
IUPAC.ambiguous_dna),
self.mutable_s)
def test_inserting(self):
self.mutable_s.insert(4, "G")
self.assertEqual(MutableSeq("TCAAGAAGGATGCATCATG",
IUPAC.ambiguous_dna),
self.mutable_s)
def test_popping_last_item(self):
self.assertEqual("G", self.mutable_s.pop())
def test_remove_items(self):
self.mutable_s.remove("G")
self.assertEqual(MutableSeq("TCAAAAGATGCATCATG", IUPAC.ambiguous_dna),
self.mutable_s, "Remove first G")
self.assertRaises(ValueError, self.mutable_s.remove, "Z")
def test_count(self):
self.assertEqual(7, self.mutable_s.count("A"))
self.assertEqual(2, self.mutable_s.count("AA"))
def test_index(self):
self.assertEqual(2, self.mutable_s.index("A"))
self.assertRaises(ValueError, self.mutable_s.index, "8888")
def test_reverse(self):
"""Test using reverse method."""
self.mutable_s.reverse()
self.assertEqual(MutableSeq("GTACTACGTAGGAAAACT", IUPAC.ambiguous_dna),
self.mutable_s)
def test_reverse_with_stride(self):
"""Test reverse using -1 stride."""
self.assertEqual(MutableSeq("GTACTACGTAGGAAAACT", IUPAC.ambiguous_dna),
self.mutable_s[::-1])
def test_complement(self):
self.mutable_s.complement()
self.assertEqual("AGTTTTCCTACGTAGTAC", str(self.mutable_s))
def test_complement_rna(self):
seq = Seq.MutableSeq("AUGaaaCUG", IUPAC.unambiguous_rna)
seq.complement()
self.assertEqual("UACuuuGAC", str(seq))
def test_complement_mixed_aphabets(self):
seq = Seq.MutableSeq("AUGaaaCTG")
with self.assertRaises(ValueError):
seq.complement()
def test_complement_rna_string(self):
seq = Seq.MutableSeq("AUGaaaCUG")
seq.complement()
self.assertEqual("UACuuuGAC", str(seq))
def test_complement_dna_string(self):
seq = Seq.MutableSeq("ATGaaaCTG")
seq.complement()
self.assertEqual("TACtttGAC", str(seq))
def test_reverse_complement(self):
self.mutable_s.reverse_complement()
self.assertEqual("CATGATGCATCCTTTTGA", str(self.mutable_s))
def test_reverse_complement_of_protein(self):
seq = Seq.MutableSeq("ACTGTCGTCT", Alphabet.generic_protein)
with self.assertRaises(ValueError):
seq.reverse_complement()
def test_extend_method(self):
self.mutable_s.extend("GAT")
self.assertEqual(MutableSeq("TCAAAAGGATGCATCATGGAT",
IUPAC.ambiguous_dna),
self.mutable_s)
def test_extend_with_mutable_seq(self):
self.mutable_s.extend(MutableSeq("TTT", IUPAC.ambiguous_dna))
self.assertEqual(MutableSeq("TCAAAAGGATGCATCATGTTT",
IUPAC.ambiguous_dna),
self.mutable_s)
def test_delete_stride_slice(self):
del self.mutable_s[4:6 - 1]
self.assertEqual(MutableSeq("TCAAAGGATGCATCATG", IUPAC.ambiguous_dna),
self.mutable_s)
def test_extract_third_nucleotide(self):
"""Test extracting every third nucleotide (slicing with stride 3)."""
self.assertEqual(MutableSeq("TAGTAA", IUPAC.ambiguous_dna),
self.mutable_s[0::3])
self.assertEqual(MutableSeq("CAGGTT", IUPAC.ambiguous_dna),
self.mutable_s[1::3])
self.assertEqual(MutableSeq("AAACCG", IUPAC.ambiguous_dna),
self.mutable_s[2::3])
def test_set_wobble_codon_to_n(self):
"""Test setting wobble codon to N (set slice with stride 3)."""
self.mutable_s[2::3] = "N" * len(self.mutable_s[2::3])
self.assertEqual(MutableSeq("TCNAANGGNTGNATNATN", IUPAC.ambiguous_dna),
self.mutable_s)
class TestUnknownSeq(unittest.TestCase):
def setUp(self):
self.s = Seq.UnknownSeq(6)
def test_construction(self):
self.assertEqual("??????", str(Seq.UnknownSeq(6)))
self.assertEqual("NNNNNN",
str(Seq.UnknownSeq(6, Alphabet.generic_dna)))
self.assertEqual("XXXXXX",
str(Seq.UnknownSeq(6, Alphabet.generic_protein)))
self.assertEqual("??????", str(Seq.UnknownSeq(6, character="?")))
with self.assertRaises(ValueError):
Seq.UnknownSeq(-10)
with self.assertRaises(ValueError):
Seq.UnknownSeq(6, character="??")
def test_length(self):
self.assertEqual(6, len(self.s))
def test_repr(self):
self.assertEqual(
"UnknownSeq(6, character='?')",
repr(self.s))
def test_add_method(self):
seq1 = Seq.UnknownSeq(3, Alphabet.generic_dna)
self.assertEqual("??????NNN", str(self.s + seq1))
seq2 = Seq.UnknownSeq(3, Alphabet.generic_dna)
self.assertEqual("NNNNNN", str(seq1 + seq2))
def test_getitem_method(self):
self.assertEqual("", self.s[-1:-1])
self.assertEqual("?", self.s[1])
self.assertEqual("?", self.s[5:])
self.assertEqual("?", self.s[:1])
self.assertEqual("??", self.s[1:3])
self.assertEqual("???", self.s[1:6:2])
self.assertEqual("????", self.s[1:-1])
with self.assertRaises(ValueError):
self.s[1:6:0]
def test_count(self):
self.assertEqual(6, self.s.count("?"))
self.assertEqual(3, self.s.count("??"))
self.assertEqual(0, Seq.UnknownSeq(6, character="N").count("?"))
self.assertEqual(0, Seq.UnknownSeq(6, character="N").count("??"))
self.assertEqual(4,
Seq.UnknownSeq(6, character="?").count("?", start=2))
self.assertEqual(2,
Seq.UnknownSeq(6, character="?").count("??", start=2))
def test_complement(self):
self.s.complement()
self.assertEqual("??????", str(self.s))
def test_complement_of_protein(self):
"""Check reverse complement fails on a protein."""
seq = Seq.UnknownSeq(6, Alphabet.generic_protein)
with self.assertRaises(ValueError):
seq.complement()
def test_reverse_complement(self):
self.s.reverse_complement()
self.assertEqual("??????", str(self.s))
def test_reverse_complement_of_protein(self):
seq = Seq.UnknownSeq(6, Alphabet.generic_protein)
self.assertRaises(ValueError, seq.reverse_complement)
def test_transcribe(self):
self.assertEqual("??????", self.s.transcribe())
def test_back_transcribe(self):
self.assertEqual("??????", self.s.back_transcribe())
def test_upper(self):
seq = Seq.UnknownSeq(6, Alphabet.generic_dna)
self.assertEqual("NNNNNN", str(seq.upper()))
def test_lower(self):
seq = Seq.UnknownSeq(6, Alphabet.generic_dna)
self.assertEqual("nnnnnn", str(seq.lower()))
def test_translation(self):
self.assertEqual("XX", str(self.s.translate()))
def test_translation_of_proteins(self):
seq = Seq.UnknownSeq(6, IUPAC.protein)
self.assertRaises(ValueError, seq.translate)
def test_ungap(self):
seq = Seq.UnknownSeq(7,
alphabet=Alphabet.Gapped(Alphabet.DNAAlphabet(),
"-"))
self.assertEqual("NNNNNNN", str(seq.ungap("-")))
seq = Seq.UnknownSeq(20,
alphabet=Alphabet.Gapped(Alphabet.DNAAlphabet(),
"-"), character="-")
self.assertEqual("", seq.ungap("-"))
class TestAmbiguousComplements(unittest.TestCase):
def test_ambiguous_values(self):
"""Test that other tests do not introduce characters to our values."""
self.assertFalse("-" in ambiguous_dna_values)
self.assertFalse("?" in ambiguous_dna_values)
class TestComplement(unittest.TestCase):
def test_complement_ambiguous_dna_values(self):
for ambig_char, values in sorted(ambiguous_dna_values.items()):
compl_values = str(
Seq.Seq(values, alphabet=IUPAC.ambiguous_dna).complement())
ambig_values = (
ambiguous_dna_values[ambiguous_dna_complement[ambig_char]])
self.assertEqual(set(compl_values), set(ambig_values))
def test_complement_ambiguous_rna_values(self):
for ambig_char, values in sorted(ambiguous_rna_values.items()):
compl_values = str(
Seq.Seq(values, alphabet=IUPAC.ambiguous_rna).complement())
ambig_values = (
ambiguous_rna_values[ambiguous_rna_complement[ambig_char]])
self.assertEqual(set(compl_values), set(ambig_values))
def test_complement_incompatible_alphabets(self):
seq = Seq.Seq("CAGGTU")
with self.assertRaises(ValueError):
seq.complement()
def test_complement_of_mixed_dna_rna(self):
seq = "AUGAAACTG" # U and T
self.assertRaises(ValueError, Seq.complement, seq)
def test_complement_of_rna(self):
seq = "AUGAAACUG"
self.assertEqual("UACUUUGAC", Seq.complement(seq))
def test_complement_of_dna(self):
seq = "ATGAAACTG"
self.assertEqual("TACTTTGAC", Seq.complement(seq))
def test_complement_on_proteins(self):
"""Check complement fails on a protein."""
for s in protein_seqs:
with self.assertRaises(ValueError):
Seq.complement(s)
with self.assertRaises(ValueError):
s.complement()
class TestReverseComplement(unittest.TestCase):
def test_reverse_complement(self):
test_seqs_copy = copy.copy(test_seqs)
test_seqs_copy.pop(21)
for nucleotide_seq in test_seqs_copy:
if not isinstance(nucleotide_seq.alphabet,
Alphabet.ProteinAlphabet) and \
isinstance(nucleotide_seq, Seq.Seq):
expected = Seq.reverse_complement(nucleotide_seq)
self.assertEqual(
repr(expected), repr(nucleotide_seq.reverse_complement()))
self.assertEqual(
repr(expected[::-1]), repr(nucleotide_seq.complement()))
self.assertEqual(
str(nucleotide_seq.complement()),
str(Seq.reverse_complement(nucleotide_seq))[::-1])
self.assertEqual(str(nucleotide_seq.reverse_complement()),
str(Seq.reverse_complement(nucleotide_seq)))
def test_reverse_complement_of_mixed_dna_rna(self):
seq = "AUGAAACTG" # U and T
self.assertRaises(ValueError, Seq.reverse_complement, seq)
def test_reverse_complement_of_rna(self):
seq = "AUGAAACUG"
self.assertEqual("CAGUUUCAU", Seq.reverse_complement(seq))
def test_reverse_complement_of_dna(self):
seq = "ATGAAACTG"
self.assertEqual("CAGTTTCAT", Seq.reverse_complement(seq))
def test_reverse_complement_on_proteins(self):
"""Check reverse complement fails on a protein."""
for s in protein_seqs:
with self.assertRaises(ValueError):
Seq.reverse_complement(s)
with self.assertRaises(ValueError):
s.reverse_complement()
class TestDoubleReverseComplement(unittest.TestCase):
def test_reverse_complements(self):
"""Test double reverse complement preserves the sequence."""
sorted_amb_rna = sorted(ambiguous_rna_values)
sorted_amb_dna = sorted(ambiguous_dna_values)
for sequence in [Seq.Seq("".join(sorted_amb_rna)),
Seq.Seq("".join(sorted_amb_dna)),
Seq.Seq("".join(sorted_amb_rna),
Alphabet.generic_rna),
Seq.Seq("".join(sorted_amb_dna),
Alphabet.generic_dna),
Seq.Seq("".join(sorted_amb_rna).replace("X", ""),
IUPAC.IUPACAmbiguousRNA()),
Seq.Seq("".join(sorted_amb_dna).replace("X", ""),
IUPAC.IUPACAmbiguousDNA()),
Seq.Seq("AWGAARCKG")]: # Note no U or T
reversed_sequence = sequence.reverse_complement()
self.assertEqual(str(sequence),
str(reversed_sequence.reverse_complement()))
class TestSequenceAlphabets(unittest.TestCase):
def test_sequence_alphabets(self):
"""Sanity test on the test sequence alphabets.
See also enhancement bug 2597.
"""
for nucleotide_seq in test_seqs:
if "U" in str(nucleotide_seq).upper():
self.assertNotIsInstance(nucleotide_seq.alphabet,
Alphabet.DNAAlphabet)
if "T" in str(nucleotide_seq).upper():
self.assertNotIsInstance(nucleotide_seq.alphabet,
Alphabet.RNAAlphabet)
class TestTranscription(unittest.TestCase):
def test_transcription_dna_into_rna(self):
for nucleotide_seq in test_seqs:
if isinstance(nucleotide_seq.alphabet, Alphabet.DNAAlphabet):
expected = Seq.transcribe(nucleotide_seq)
self.assertEqual(
str(nucleotide_seq).replace("t", "u").replace("T", "U"),
str(expected))
def test_transcription_dna_string_into_rna(self):
seq = "ATGAAACTG"
self.assertEqual("AUGAAACUG", Seq.transcribe(seq))
def test_seq_object_transcription_method(self):
for nucleotide_seq in test_seqs:
if isinstance(nucleotide_seq.alphabet, Alphabet.DNAAlphabet) and \
isinstance(nucleotide_seq, Seq.Seq):
self.assertEqual(repr(Seq.transcribe(nucleotide_seq)),
repr(nucleotide_seq.transcribe()))
def test_transcription_of_rna(self):
"""Check transcription fails on RNA."""
seq = Seq.Seq("AUGAAACUG", IUPAC.ambiguous_rna)
with self.assertRaises(ValueError):
seq.transcribe()
def test_transcription_of_proteins(self):
"""Check transcription fails on a protein."""
for s in protein_seqs:
with self.assertRaises(ValueError):
Seq.transcribe(s)
if isinstance(s, Seq.Seq):
with self.assertRaises(ValueError):
s.transcribe()
def test_back_transcribe_rna_into_dna(self):
for nucleotide_seq in test_seqs:
if isinstance(nucleotide_seq.alphabet, Alphabet.RNAAlphabet):
expected = Seq.back_transcribe(nucleotide_seq)
self.assertEqual(
str(nucleotide_seq).replace("u", "t").replace("U", "T"),
str(expected))
def test_back_transcribe_rna_string_into_dna(self):
seq = "AUGAAACUG"
self.assertEqual("ATGAAACTG", Seq.back_transcribe(seq))
def test_seq_object_back_transcription_method(self):
for nucleotide_seq in test_seqs:
if isinstance(nucleotide_seq.alphabet, Alphabet.RNAAlphabet) and \
isinstance(nucleotide_seq, Seq.Seq):
expected = Seq.back_transcribe(nucleotide_seq)
self.assertEqual(repr(nucleotide_seq.back_transcribe()),
repr(expected))
def test_back_transcription_of_proteins(self):
"""Check back-transcription fails on a protein."""
for s in protein_seqs:
with self.assertRaises(ValueError):
Seq.back_transcribe(s)
if isinstance(s, Seq.Seq):
with self.assertRaises(ValueError):
s.back_transcribe()
def test_back_transcription_of_dna(self):
"""Check back-transcription fails on DNA."""
seq = Seq.Seq("ATGAAACTG", IUPAC.ambiguous_dna)
with self.assertRaises(ValueError):
seq.back_transcribe()
class TestTranslating(unittest.TestCase):
def setUp(self):
self.test_seqs = [
Seq.Seq("TCAAAAGGATGCATCATG", IUPAC.unambiguous_dna),
Seq.Seq("ATGAAACTG"),
Seq.Seq("ATGAARCTG"),
Seq.Seq("AWGAARCKG"), # Note no U or T
Seq.Seq("".join(ambiguous_rna_values)),
Seq.Seq("".join(ambiguous_dna_values)),
Seq.Seq("".join(ambiguous_rna_values), Alphabet.generic_rna),
Seq.Seq("".join(ambiguous_dna_values), Alphabet.generic_dna),
Seq.Seq("".join(ambiguous_rna_values), IUPAC.IUPACAmbiguousRNA()),
Seq.Seq("".join(ambiguous_dna_values), IUPAC.IUPACAmbiguousDNA()),
Seq.Seq("AWGAARCKG", Alphabet.generic_dna),
Seq.Seq("AUGAAACUG", Alphabet.generic_rna),
Seq.Seq("ATGAAACTG", IUPAC.unambiguous_dna),
Seq.Seq("ATGAAACTGWN", IUPAC.ambiguous_dna),
Seq.Seq("AUGAAACUG", Alphabet.generic_rna),
Seq.Seq("AUGAAACUG", IUPAC.unambiguous_rna),
Seq.Seq("AUGAAACUGWN", IUPAC.ambiguous_rna),
Seq.Seq("ATGAAACTG", Alphabet.generic_nucleotide),
Seq.MutableSeq("ATGAAACTG", Alphabet.generic_dna),
Seq.MutableSeq("AUGaaaCUG", IUPAC.unambiguous_rna),
]
def test_translation(self):
for nucleotide_seq in self.test_seqs:
nucleotide_seq = nucleotide_seq[:3 * (len(nucleotide_seq) // 3)]
if isinstance(nucleotide_seq, Seq.Seq) and \
"X" not in str(nucleotide_seq):
expected = Seq.translate(nucleotide_seq)
self.assertEqual(repr(expected),
repr(nucleotide_seq.translate()))
def test_alphabets_of_translated_seqs(self):
def triple_pad(s):
"""Add N to ensure length is a multiple of three (whole codons)."""
while len(s) % 3:
s += "N"
return s
self.assertEqual("IUPACProtein()",
repr(self.test_seqs[0].translate().alphabet))
self.assertEqual("ExtendedIUPACProtein()",
repr(self.test_seqs[1].translate().alphabet))
self.assertEqual("ExtendedIUPACProtein()",
repr(self.test_seqs[2].translate().alphabet))
self.assertEqual("ExtendedIUPACProtein()",
repr(self.test_seqs[3].translate().alphabet))
self.assertEqual("ExtendedIUPACProtein()",
repr(self.test_seqs[10].translate().alphabet))
self.assertEqual("ExtendedIUPACProtein()",
repr(self.test_seqs[11].translate().alphabet))
self.assertEqual("IUPACProtein()",
repr(self.test_seqs[12].translate().alphabet))
self.assertEqual(
"ExtendedIUPACProtein()",
repr(triple_pad(self.test_seqs[13]).translate().alphabet))
self.assertEqual("ExtendedIUPACProtein()",
repr(self.test_seqs[14].translate().alphabet))
self.assertEqual("IUPACProtein()",
repr(self.test_seqs[15].translate().alphabet))
self.assertEqual(
"ExtendedIUPACProtein()",
repr(triple_pad(self.test_seqs[16]).translate().alphabet))
self.assertEqual(
"ExtendedIUPACProtein()",
repr(triple_pad(self.test_seqs[17]).translate().alphabet))
def test_gapped_seq_with_gap_char_given(self):
seq = Seq.Seq("ATG---AAACTG")
self.assertEqual("M-KL", seq.translate(gap="-"))
self.assertRaises(TranslationError, seq.translate, gap="~")
def test_gapped_seq_with_stop_codon_and_gap_char_given(self):
seq = Seq.Seq("GTG---GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
self.assertEqual("V-AIVMGR*KGAR*", seq.translate(gap="-"))
self.assertRaises(TranslationError, seq.translate)
def test_gapped_seq_with_gap_char_given_and_inferred_from_alphabet(self):
seq = Seq.Seq("ATG---AAACTG", Gapped(IUPAC.unambiguous_dna))
self.assertEqual("M-KL", seq.translate(gap="-"))
self.assertRaises(ValueError, seq.translate, gap="~")
seq = Seq.Seq("ATG~~~AAACTG", Gapped(IUPAC.unambiguous_dna))
self.assertRaises(ValueError, seq.translate, gap="~")
self.assertRaises(TranslationError, seq.translate, gap="-")
def test_gapped_seq_with_gap_char_given_and_inferred_from_alphabet2(self):
"""Test using stop codon in sequence."""
seq = Seq.Seq("ATG---AAACTGTAG", Gapped(IUPAC.unambiguous_dna))
self.assertEqual("M-KL*", seq.translate(gap="-"))
self.assertRaises(ValueError, seq.translate, gap="~")
seq = Seq.Seq("ATG---AAACTGTAG", Gapped(IUPAC.unambiguous_dna))
self.assertEqual("M-KL@", seq.translate(gap="-", stop_symbol="@"))
self.assertRaises(ValueError, seq.translate, gap="~")
seq = Seq.Seq("ATG~~~AAACTGTAG", Gapped(IUPAC.unambiguous_dna))
self.assertRaises(ValueError, seq.translate, gap="~")
self.assertRaises(TranslationError, seq.translate, gap="-")
def test_gapped_seq_no_gap_char_given(self):
seq = Seq.Seq("ATG---AAACTG")
self.assertRaises(TranslationError, seq.translate)
def test_gapped_seq_no_gap_char_given_and_inferred_from_alphabet(self):
seq = Seq.Seq("ATG---AAACTG", Gapped(IUPAC.unambiguous_dna))
self.assertEqual("M-KL", seq.translate())
seq = Seq.Seq("ATG~~~AAACTG", Gapped(IUPAC.unambiguous_dna))
self.assertRaises(TranslationError, seq.translate)
seq = Seq.Seq("ATG~~~AAACTG", Gapped(IUPAC.unambiguous_dna, "~"))
self.assertEqual("M~KL", seq.translate())
def test_alphabet_of_translated_gapped_seq(self):
seq = Seq.Seq("ATG---AAACTG", Gapped(IUPAC.unambiguous_dna))
self.assertEqual("Gapped(ExtendedIUPACProtein(), '-')",
repr(seq.translate().alphabet))
seq = Seq.Seq("ATG---AAACTG", Gapped(IUPAC.unambiguous_dna, "-"))
self.assertEqual("Gapped(ExtendedIUPACProtein(), '-')",
repr(seq.translate().alphabet))
seq = Seq.Seq("ATG~~~AAACTG", Gapped(IUPAC.unambiguous_dna, "~"))
self.assertEqual("Gapped(ExtendedIUPACProtein(), '~')",
repr(seq.translate().alphabet))
seq = Seq.Seq("ATG---AAACTG")
self.assertEqual("Gapped(ExtendedIUPACProtein(), '-')",
repr(seq.translate(gap="-").alphabet))
seq = Seq.Seq("ATG~~~AAACTG")
self.assertEqual("Gapped(ExtendedIUPACProtein(), '~')",
repr(seq.translate(gap="~").alphabet))
seq = Seq.Seq("ATG~~~AAACTGTAG")
self.assertEqual(
"HasStopCodon(Gapped(ExtendedIUPACProtein(), '~'), '*')",
repr(seq.translate(gap="~").alphabet))
seq = Seq.Seq("ATG---AAACTGTGA")
self.assertEqual(
"HasStopCodon(Gapped(ExtendedIUPACProtein(), '-'), '*')",
repr(seq.translate(gap="-").alphabet))
seq = Seq.Seq("ATG---AAACTGTGA")
self.assertEqual(
"HasStopCodon(Gapped(ExtendedIUPACProtein(), '-'), '@')",
repr(seq.translate(gap="-", stop_symbol="@").alphabet))
def test_translation_wrong_type(self):
"""Test translation table cannot be CodonTable."""
seq = Seq.Seq("ATCGTA")
with self.assertRaises(ValueError):
seq.translate(table=ambiguous_dna_complement)
def test_translation_of_string(self):
seq = "GTGGCCATTGTAATGGGCCGC"
self.assertEqual("VAIVMGR", Seq.translate(seq))
def test_translation_of_gapped_string_with_gap_char_given(self):
seq = "GTG---GCCATTGTAATGGGCCGC"
expected = "V-AIVMGR"
self.assertEqual(expected, Seq.translate(seq, gap="-"))
self.assertRaises(TypeError, Seq.translate, seq, gap=[])
self.assertRaises(ValueError, Seq.translate, seq, gap="-*")
def test_translation_of_gapped_string_no_gap_char_given(self):
seq = "GTG---GCCATTGTAATGGGCCGC"
self.assertRaises(TranslationError, Seq.translate, seq)
def test_translation_to_stop(self):
for nucleotide_seq in self.test_seqs:
nucleotide_seq = nucleotide_seq[:3 * (len(nucleotide_seq) // 3)]
if isinstance(nucleotide_seq, Seq.Seq) and \
"X" not in str(nucleotide_seq):
short = Seq.translate(nucleotide_seq, to_stop=True)
self.assertEqual(
str(short),
str(Seq.translate(nucleotide_seq).split("*")[0]))
seq = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"
self.assertEqual("VAIVMGRWKGAR", Seq.translate(seq, table=2,
to_stop=True))
def test_translation_on_proteins(self):
"""Check translation fails on a protein."""
for s in protein_seqs:
with self.assertRaises(ValueError):
Seq.translate(s)
if isinstance(s, Seq.Seq):
with self.assertRaises(ValueError):
s.translate()
def test_translation_of_invalid_codon(self):
for codon in ["TA?", "N-N", "AC_", "Ac_"]:
with self.assertRaises(TranslationError):
Seq.translate(codon)
def test_translation_of_glutamine(self):
for codon in ["SAR", "SAG", "SAA"]:
self.assertEqual("Z", Seq.translate(codon))
def test_translation_of_asparagine(self):
for codon in ["RAY", "RAT", "RAC"]:
self.assertEqual("B", Seq.translate(codon))
def test_translation_of_leucine(self):
for codon in ["WTA", "MTY", "MTT", "MTW", "MTM", "MTH", "MTA", "MTC",
"HTA"]:
self.assertEqual("J", Seq.translate(codon))
def test_translation_with_bad_table_argument(self):
table = {}
with self.assertRaises(ValueError):
Seq.translate("GTGGCCATTGTAATGGGCCGC", table=table)
def test_translation_with_codon_table_as_table_argument(self):
table = standard_dna_table
self.assertEqual("VAIVMGR", Seq.translate("GTGGCCATTGTAATGGGCCGC",
table=table))
def test_translation_incomplete_codon(self):
with self.assertWarns(BiopythonWarning):
Seq.translate("GTGGCCATTGTAATGGGCCG")
def test_translation_extra_stop_codon(self):
seq = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAGTAG"
with self.assertRaises(TranslationError):
Seq.translate(seq, table=2, cds=True)
def test_translation_using_cds(self):
seq = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"
self.assertEqual("MAIVMGRWKGAR", Seq.translate(seq, table=2, cds=True))
seq = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCG" # not multiple of three
with self.assertRaises(TranslationError):
Seq.translate(seq, table=2, cds=True)
seq = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA" # no stop codon
with self.assertRaises(TranslationError):
Seq.translate(seq, table=2, cds=True)
seq = "GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG" # no start codon
with self.assertRaises(TranslationError):
Seq.translate(seq, table=2, cds=True)
def test_translation_using_tables_with_ambiguous_stop_codons(self):
"""Check for error and warning messages.
Here, 'ambiguous stop codons' means codons of unambiguous sequence
but with a context sensitive encoding as STOP or an amino acid.
Thus, these codons appear within the codon table in the forward
table as well as in the list of stop codons.
"""
seq = "ATGGGCTGA"
with self.assertRaises(ValueError):
Seq.translate(seq, table=28, to_stop=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Seq.translate(seq, table=28)
message = str(w[-1].message)
self.assertTrue(message.startswith("This table contains"))
self.assertTrue(message.endswith("be translated as amino acid."))
class TestStopCodons(unittest.TestCase):
def setUp(self):
self.misc_stops = "TAATAGTGAAGAAGG"
def test_stops(self):
for nucleotide_seq in [self.misc_stops, Seq.Seq(self.misc_stops),
Seq.Seq(self.misc_stops,
Alphabet.generic_nucleotide),
Seq.Seq(self.misc_stops,
Alphabet.DNAAlphabet()),
Seq.Seq(self.misc_stops,
IUPAC.unambiguous_dna)]:
self.assertEqual("***RR", str(Seq.translate(nucleotide_seq)))
self.assertEqual("***RR", str(Seq.translate(nucleotide_seq,
table=1)))
self.assertEqual("***RR", str(Seq.translate(nucleotide_seq,
table="SGC0")))
self.assertEqual("**W**", str(Seq.translate(nucleotide_seq,
table=2)))
self.assertEqual("**WRR", str(Seq.translate(nucleotide_seq,
table="Yeast Mitochondrial")))
self.assertEqual("**WSS", str(Seq.translate(nucleotide_seq,
table=5)))
self.assertEqual("**WSS", str(Seq.translate(nucleotide_seq,
table=9)))
self.assertEqual("**CRR", str(Seq.translate(nucleotide_seq,
table="Euplotid Nuclear")))
self.assertEqual("***RR", str(Seq.translate(nucleotide_seq,
table=11)))
self.assertEqual("***RR", str(Seq.translate(nucleotide_seq,
table="Bacterial")))
def test_translation_of_stops(self):
self.assertEqual(Seq.translate("TAT"), "Y")
self.assertEqual(Seq.translate("TAR"), "*")
self.assertEqual(Seq.translate("TAN"), "X")
self.assertEqual(Seq.translate("NNN"), "X")
self.assertEqual(Seq.translate("TAt"), "Y")
self.assertEqual(Seq.translate("TaR"), "*")
self.assertEqual(Seq.translate("TaN"), "X")
self.assertEqual(Seq.translate("nnN"), "X")
self.assertEqual(Seq.translate("tat"), "Y")
self.assertEqual(Seq.translate("tar"), "*")
self.assertEqual(Seq.translate("tan"), "X")
self.assertEqual(Seq.translate("nnn"), "X")
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| python | 62,433 |
import numpy as np
import catboost as cb
import vaex.ml.catboost
import vaex.ml.datasets
# the parameters of the model
params_multiclass = {
'leaf_estimation_method': 'Gradient',
'learning_rate': 0.1,
'max_depth': 3,
'bootstrap_type': 'Bernoulli',
'subsample': 0.8,
'sampling_frequency': 'PerTree',
'colsample_bylevel': 0.8,
'reg_lambda': 1,
'objective': 'MultiClass',
'eval_metric': 'MultiClass',
'random_state': 42,
'verbose': 0,
}
# catboost params
params_reg = {
'leaf_estimation_method': 'Gradient',
'learning_rate': 0.1,
'max_depth': 3,
'bootstrap_type': 'Bernoulli',
'subsample': 0.8,
'sampling_frequency': 'PerTree',
'colsample_bylevel': 0.8,
'reg_lambda': 1,
'objective': 'MAE',
'eval_metric': 'R2',
'random_state': 42,
'verbose': 0,
}
def test_catboost():
ds = vaex.ml.datasets.load_iris()
ds_train, ds_test = ds.ml.train_test_split(test_size=0.2, verbose=False)
features = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
booster = vaex.ml.catboost.CatBoostModel(num_boost_round=10,
params=params_multiclass,
features=features,
target='class_',
prediction_type='Probability')
# Predict in memory
booster.fit(ds_train)
class_predict = booster.predict(ds_test)
assert np.all(ds_test.col.class_.values == np.argmax(class_predict, axis=1))
# Transform
ds_train = booster.transform(ds_train) # this will add the catboost_prediction column
state = ds_train.state_get()
ds_test.state_set(state)
assert np.all(ds_test.col.class_.values == np.argmax(ds_test.catboost_prediction.values, axis=1))
def test_catboost_numerical_validation():
ds = vaex.ml.datasets.load_iris()
features = ['sepal_width', 'petal_length', 'sepal_length', 'petal_width']
# Vanilla catboost
dtrain = cb.Pool(ds[features].values, label=ds.data.class_)
cb_bst = cb.train(params=params_multiclass, dtrain=dtrain, num_boost_round=3)
cb_pred = cb_bst.predict(dtrain, prediction_type='Probability')
# catboost through vaex
booster = vaex.ml.catboost.CatBoostModel(features=features, target='class_', params=params_multiclass, num_boost_round=3)
booster.fit(ds)
vaex_pred = booster.predict(ds)
# Comparing the the predictions of catboost vs vaex.ml
np.testing.assert_equal(vaex_pred, cb_pred, verbose=True,
err_msg='The predictions of vaex.ml.catboost do not match those of pure catboost')
def test_lightgbm_serialize(tmpdir):
ds = vaex.ml.datasets.load_iris()
features = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
target = 'class_'
gbm = ds.ml.catboost_model(target, features=features, num_boost_round=20, params=params_multiclass)
pl = vaex.ml.Pipeline([gbm])
pl.save(str(tmpdir.join('test.json')))
pl.load(str(tmpdir.join('test.json')))
gbm = ds.ml.catboost_model(target, features=features, num_boost_round=20, params=params_multiclass)
gbm.state_set(gbm.state_get())
pl = vaex.ml.Pipeline([gbm])
pl.save(str(tmpdir.join('test.json')))
pl.load(str(tmpdir.join('test.json')))
def test_catboost_validation_set():
# read data
ds = vaex.example()
# Train and test split
train, test = ds.ml.train_test_split(verbose=False)
# Define the training featuress
features = ['vx', 'vy', 'vz', 'Lz', 'L']
# instantiate the booster model
booster = vaex.ml.catboost.CatBoostModel(features=features, target='E', num_boost_round=10, params=params_reg)
# fit the booster - including saving the history of the validation sets
booster.fit(train, evals=[train, test])
assert hasattr(booster, 'booster')
assert len(booster.booster.evals_result_['learn']['MAE']) == 10
assert len(booster.booster.evals_result_['learn']['R2']) == 10
assert len(booster.booster.evals_result_['validation_0']['MAE']) == 10
assert len(booster.booster.evals_result_['validation_0']['R2']) == 10
assert hasattr(booster.booster, 'best_iteration_')
assert booster.booster.best_iteration_ is not None
def test_catboost_pipeline():
# read data
ds = vaex.example()
# train test splot
train, test = ds.ml.train_test_split(verbose=False)
# add virtual columns
train['r'] = np.sqrt(train.x**2 + train.y**2 + train.z**2)
# Do a pca
features = ['vx', 'vy', 'vz', 'Lz', 'L']
pca = train.ml.pca(n_components=3, features=features)
train = pca.transform(train)
# Do state transfer
st = train.ml.state_transfer()
# now the catboost model thingy
features = ['r', 'PCA_0', 'PCA_1', 'PCA_2']
# define the boosting model
booster = train.ml.catboost_model(target='E', num_boost_round=10, features=features, params=params_reg)
# Create a pipeline
pp = vaex.ml.Pipeline([st, booster])
# Use the pipeline
pred = pp.predict(test) # This works
trans = pp.transform(test) # This will crash (softly)
# trans.evaluate('catboost_prediction') # This is where the problem happens
np.testing.assert_equal(pred,
trans.evaluate('catboost_prediction'),
verbose=True,
err_msg='The predictions from the predict and transform method do not match')
| python | 5,518 |
# -*- coding:utf-8 -*-
import sys
import ssl
# sys.path.insert(0,'../../')
from .CCPRestSDK import REST
ssl._create_default_https_context = ssl._create_unverified_context # 全局取消证书验证
# 说明:主账号,登陆云通讯网站后,可在"控制台-应用"中看到开发者主账号ACCOUNT SID
_accountSid = '8aaf07087172a6ee01719ba430a6174f'
# 说明:主账号Token,登陆云通讯网站后,可在控制台-应用中看到开发者主账号AUTH TOKEN
_accountToken = 'ee7879df5ee84a579ab16bfeb87c3105'
# 请使用管理控制台首页的APPID或自己创建应用的APPID
_appId = '8aaf07087172a6ee01719ba4311f1756'
# 说明:请求地址,生产环境配置成app.cloopen.com
# _serverIP = 'sandboxapp.cloopen.com'
_serverIP = 'app.cloopen.com'
# 说明:请求端口 ,生产环境为8883
_serverPort = "8883"
# 说明:REST API版本号保持不变
_softVersion = '2013-12-26'
# 云通讯官方提供的发送短信代码实例
# # 发送模板短信
# # @param to 手机号码
# # @param datas 内容数据 格式为数组 例如:{'12','34'},如不需替换请填 ''
# # @param $tempId 模板Id
#
# def sendTemplateSMS(to, datas, tempId):
# # 初始化REST SDK
# rest = REST(serverIP, serverPort, softVersion)
# rest.setAccount(accountSid, accountToken)
# rest.setAppId(appId)
#
# result = rest.sendTemplateSMS(to, datas, tempId)
# for k, v in result.iteritems():
#
# if k == 'templateSMS':
# for k, s in v.iteritems():
# print '%s:%s' % (k, s)
# else:
# print '%s:%s' % (k, v)
class CCP(object):
"""发送短信的辅助类"""
def __new__(cls, *args, **kwargs):
# 判断是否存在类属性_instance,_instance是类CCP的唯一对象,即单例
if not hasattr(CCP, "_instance"):
cls._instance = super(CCP, cls).__new__(cls, *args, **kwargs)
cls._instance.rest = REST(_serverIP, _serverPort, _softVersion)
cls._instance.rest.setAccount(_accountSid, _accountToken)
cls._instance.rest.setAppId(_appId)
return cls._instance
def send_template_sms(self, to, datas, temp_id):
"""发送模板短信"""
# @param to 手机号码
# @param datas 内容数据 格式为数组 例如:{'12','34'},如不需替换请填 ''
# @param temp_id 模板Id
result = self.rest.sendTemplateSMS(to, datas, temp_id)
# a = result.get("statusCode")
# print(a)
# 如果云通讯发送短信成功,返回的字典数据result中statuCode字段的值为"000000"
if result.get("statusCode") == "000000":
# 返回0 表示发送短信成功
return 0
else:
# 返回-1 表示发送失败
return -1
# if __name__ == '__main__':
# ccp = CCP()
# # 注意: 测试的短信模板编号为1
# ccp.send_template_sms('13690599000', ['666', 5], 1) | python | 2,377 |
import tensorflow as tf
def compute_area(top_left, bot_right):
""" Compute area given top_left and bottom_right coordinates
Args:
top_left: tensor (num_boxes, 2)
bot_right: tensor (num_boxes, 2)
Returns:
area: tensor (num_boxes,)
"""
# top_left: N x 2
# bot_right: N x 2
hw = tf.clip_by_value(bot_right - top_left, 0.0, 512.0)
area = hw[..., 0] * hw[..., 1]
return area
def compute_iou(boxes_a, boxes_b):
""" Compute overlap between boxes_a and boxes_b
Args:
boxes_a: tensor (num_boxes_a, 4)
boxes_b: tensor (num_boxes_b, 4)
Returns:
overlap: tensor (num_boxes_a, num_boxes_b)
"""
# boxes_a => num_boxes_a, 1, 4
boxes_a = tf.expand_dims(boxes_a, 1)
# boxes_b => 1, num_boxes_b, 4
boxes_b = tf.expand_dims(boxes_b, 0)
top_left = tf.math.maximum(boxes_a[..., :2], boxes_b[..., :2])
bot_right = tf.math.minimum(boxes_a[..., 2:], boxes_b[..., 2:])
overlap_area = compute_area(top_left, bot_right)
area_a = compute_area(boxes_a[..., :2], boxes_a[..., 2:])
area_b = compute_area(boxes_b[..., :2], boxes_b[..., 2:])
overlap = overlap_area / (area_a + area_b - overlap_area)
return overlap
def compute_target(default_boxes, gt_boxes, gt_labels, iou_threshold=0.5):
""" Compute regression and classification targets
Args:
default_boxes: tensor (num_default, 4)
of format (cx, cy, w, h)
gt_boxes: tensor (num_gt, 4)
of format (xmin, ymin, xmax, ymax)
gt_labels: tensor (num_gt,)
Returns:
gt_confs: classification targets, tensor (num_default,)
gt_locs: regression targets, tensor (num_default, 4)
"""
# Convert default boxes to format (xmin, ymin, xmax, ymax)
# in order to compute overlap with gt boxes
transformed_default_boxes = transform_center_to_corner(default_boxes)
iou = compute_iou(transformed_default_boxes, gt_boxes)
best_gt_iou = tf.math.reduce_max(iou, 1)
best_gt_idx = tf.math.argmax(iou, 1)
best_default_iou = tf.math.reduce_max(iou, 0)
best_default_idx = tf.math.argmax(iou, 0)
best_gt_idx = tf.tensor_scatter_nd_update(
best_gt_idx,
tf.expand_dims(best_default_idx, 1),
tf.range(best_default_idx.shape[0], dtype=tf.int64))
# Normal way: use a for loop
# for gt_idx, default_idx in enumerate(best_default_idx):
# best_gt_idx = tf.tensor_scatter_nd_update(
# best_gt_idx,
# tf.expand_dims([default_idx], 1),
# [gt_idx])
best_gt_iou = tf.tensor_scatter_nd_update(
best_gt_iou,
tf.expand_dims(best_default_idx, 1),
tf.ones_like(best_default_idx, dtype=tf.float32))
gt_confs = tf.gather(gt_labels, best_gt_idx)
gt_confs = tf.where(
tf.less(best_gt_iou, iou_threshold),
tf.zeros_like(gt_confs),
gt_confs)
gt_boxes = tf.gather(gt_boxes, best_gt_idx)
gt_locs = encode(default_boxes, gt_boxes)
return gt_confs, gt_locs
def encode(default_boxes, boxes, variance=[0.1, 0.2]):
""" Compute regression values
Args:
default_boxes: tensor (num_default, 4)
of format (cx, cy, w, h)
boxes: tensor (num_default, 4)
of format (xmin, ymin, xmax, ymax)
variance: variance for center point and size
Returns:
locs: regression values, tensor (num_default, 4)
"""
# Convert boxes to (cx, cy, w, h) format
transformed_boxes = transform_corner_to_center(boxes)
locs = tf.concat([
(transformed_boxes[..., :2] - default_boxes[:, :2]
) / (default_boxes[:, 2:] * variance[0]),
tf.math.log(transformed_boxes[..., 2:] / default_boxes[:, 2:]) / variance[1]],
axis=-1)
return locs
def decode(default_boxes, locs, variance=[0.1, 0.2]):
""" Decode regression values back to coordinates
Args:
default_boxes: tensor (num_default, 4)
of format (cx, cy, w, h)
locs: tensor (batch_size, num_default, 4)
of format (cx, cy, w, h)
variance: variance for center point and size
Returns:
boxes: tensor (num_default, 4)
of format (xmin, ymin, xmax, ymax)
"""
locs = tf.concat([
locs[..., :2] * variance[0] *
default_boxes[:, 2:] + default_boxes[:, :2],
tf.math.exp(locs[..., 2:] * variance[1]) * default_boxes[:, 2:]], axis=-1)
boxes = transform_center_to_corner(locs)
return boxes
def transform_corner_to_center(boxes):
""" Transform boxes of format (xmin, ymin, xmax, ymax)
to format (cx, cy, w, h)
Args:
boxes: tensor (num_boxes, 4)
of format (xmin, ymin, xmax, ymax)
Returns:
boxes: tensor (num_boxes, 4)
of format (cx, cy, w, h)
"""
center_box = tf.concat([
(boxes[..., :2] + boxes[..., 2:]) / 2,
boxes[..., 2:] - boxes[..., :2]], axis=-1)
return center_box
def transform_center_to_corner(boxes):
""" Transform boxes of format (cx, cy, w, h)
to format (xmin, ymin, xmax, ymax)
Args:
boxes: tensor (num_boxes, 4)
of format (cx, cy, w, h)
Returns:
boxes: tensor (num_boxes, 4)
of format (xmin, ymin, xmax, ymax)
"""
corner_box = tf.concat([
boxes[..., :2] - boxes[..., 2:] / 2,
boxes[..., :2] + boxes[..., 2:] / 2], axis=-1)
return corner_box
def compute_nms(boxes, scores, nms_threshold, limit=200):
""" Perform Non Maximum Suppression algorithm
to eliminate boxes with high overlap
Args:
boxes: tensor (num_boxes, 4)
of format (xmin, ymin, xmax, ymax)
scores: tensor (num_boxes,)
nms_threshold: NMS threshold
limit: maximum number of boxes to keep
Returns:
idx: indices of kept boxes
"""
if boxes.shape[0] == 0:
return tf.constant([], dtype=tf.int32)
selected = [0]
idx = tf.argsort(scores, direction='DESCENDING')
idx = idx[:limit]
boxes = tf.gather(boxes, idx)
iou = compute_iou(boxes, boxes)
while True:
row = iou[selected[-1]]
next_indices = row <= nms_threshold
# iou[:, ~next_indices] = 1.0
iou = tf.where(
tf.expand_dims(tf.math.logical_not(next_indices), 0),
tf.ones_like(iou, dtype=tf.float32),
iou)
if not tf.math.reduce_any(next_indices):
break
selected.append(tf.argsort(
tf.dtypes.cast(next_indices, tf.int32), direction='DESCENDING')[0].numpy())
return tf.gather(idx, selected)
| python | 6,703 |
"""
Django settings for iFarm project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x^)pub)%&@t-^y@-481iy_mrz-sjc@xxl)_44aty@^b3cjb5d2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'iFarmapp.apps.IfarmappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'iFarm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'iFarm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| python | 3,095 |
# Generated by Django 3.2.4 on 2021-08-03 16:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bidding', '0006_bidhandshakecycle'),
]
operations = [
migrations.AddField(
model_name='bidhandshake',
name='bid_cycle_id',
field=models.IntegerField(help_text='The bid cycle ID', null=True),
),
]
| python | 424 |
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
""" Gateware for working with abstract endpoints. """
import functools
import operator
from amaranth import Signal, Elaboratable, Module
from amaranth.hdl.ast import Past
from .packet import DataCRCInterface, InterpacketTimerInterface, TokenDetectorInterface
from .packet import HandshakeExchangeInterface
from ..stream import USBInStreamInterface, USBOutStreamInterface
from ...utils.bus import OneHotMultiplexer
class EndpointInterface:
""" Interface that connects a USB endpoint module to a USB device.
Many non-control endpoints won't need to use the latter half of this structure;
it will be automatically removed by the relevant synthesis tool.
Attributes
----------
tokenizer: TokenDetectorInterface, to detector
Interface to our TokenDetector; notifies us of USB tokens.
rx: USBOutStreamInterface, input stream to endpoint
Receive interface for this endpoint.
rx_complete: Signal(), input to endpoint
Strobe that indicates that the concluding rx-stream was valid (CRC check passed).
rx_ready_for_response: Signal(), input to endpoint
Strobe that indicates that we're ready to respond to a complete transmission.
Indicates that an interpacket delay has passed after an `rx_complete` strobe.
rx_invalid: Signal(), input to endpoint
Strobe that indicates that the concluding rx-stream was invalid (CRC check failed).
rx_pid_toggle: Signal(), input to endpoint
Value for the data PID toggle; 0 indicates we're receiving a DATA0; 1 indicates Data1.
tx: USBInStreamInterface, output stream from endpoint
Transmit interface for this endpoint.
tx_pid_toggle: Signal(2), output from endpoint
Value for the data PID toggle; 0 indicates we'll send DATA0; 1 indicates DATA1.
2 indicates we'll send DATA2, while 3 indicates we'll send DATAM.
handshakes_in: HandshakeExchangeInterface, input to endpoint
Carries handshakes detected from the host.
handshakes_out: HandshakeExchangeInterface, output from endpoint
Carries handshakes generate by this endpoint.
speed: Signal(2), input to endpoint
The device's current operating speed. Should be a USBSpeed enumeration value --
0 for high, 1 for full, 2 for low.
active_address: Signal(7), input to endpoint
Contains the device's current address.
address_changed: Signal(), output from endpoint.
Strobe; pulses high when the device's address should be changed.
new_address: Signal(7), output from endpoint
When :attr:`address_changed` is high, this field contains the address that should be adopted.
active_config: Signal(8), input to endpoint
The configuration number of the active configuration.
config_changed: Signal(), output from endpoint
Strobe; pulses high when the device's configuration should be changed.
new_config: Signal(8)
When `config_changed` is high, this field contains the configuration that should be applied.
timer: InterpacketTimerInterface
Interface to our interpacket timer.
data_crc: DataCRCInterface
Control connection for our data-CRC unit.
"""
def __init__(self):
self.data_crc = DataCRCInterface()
self.tokenizer = TokenDetectorInterface()
self.timer = InterpacketTimerInterface()
self.speed = Signal(2)
self.active_address = Signal(7)
self.address_changed = Signal()
self.new_address = Signal(7)
self.active_config = Signal(8)
self.config_changed = Signal()
self.new_config = Signal(8)
self.rx = USBOutStreamInterface()
self.rx_complete = Signal()
self.rx_ready_for_response = Signal()
self.rx_invalid = Signal()
self.rx_pid_toggle = Signal(2)
self.tx = USBInStreamInterface()
self.tx_pid_toggle = Signal(2)
self.handshakes_in = HandshakeExchangeInterface(is_detector=True)
self.handshakes_out = HandshakeExchangeInterface(is_detector=False)
self.issue_stall = Signal()
class USBEndpointMultiplexer(Elaboratable):
""" Multiplexes access to the resources shared between multiple endpoint interfaces.
Interfaces are added using :attr:`add_interface`.
Attributes
----------
shared: EndpointInterface
The post-multiplexer endpoint interface.
"""
def __init__(self):
#
# I/O port
#
self.shared = EndpointInterface()
#
# Internals
#
self._interfaces = []
def add_interface(self, interface: EndpointInterface):
""" Adds a EndpointInterface to the multiplexer.
Arbitration is not performed; it's expected only one endpoint will be
driving the transmit lines at a time.
"""
self._interfaces.append(interface)
def _multiplex_signals(self, m, *, when, multiplex, sub_bus=None):
""" Helper that creates a simple priority-encoder multiplexer.
Parmeters
---------
when: str
The name of the interface signal that indicates that the `multiplex` signals should be
selected for output. If this signals should be multiplexed, it should be included in `multiplex`.
multiplex: iterable(str)
The names of the interface signals to be multiplexed.
"""
def get_signal(interface, name):
""" Fetches an interface signal by name / sub_bus. """
if sub_bus:
bus = getattr(interface, sub_bus)
return getattr(bus, name)
else:
return getattr(interface, name)
# We're building an if-elif tree; so we should start with an If entry.
conditional = m.If
for interface in self._interfaces:
condition = get_signal(interface, when)
with conditional(condition):
# Connect up each of our signals.
for signal_name in multiplex:
# Get the actual signals for our input and output...
driving_signal = get_signal(interface, signal_name)
target_signal = get_signal(self.shared, signal_name)
# ... and connect them.
m.d.comb += target_signal .eq(driving_signal)
# After the first element, all other entries should be created with Elif.
conditional = m.Elif
def or_join_interface_signals(self, m, signal_for_interface):
""" Joins together a set of signals on each interface by OR'ing the signals together. """
# Find the value of all of our pre-mux signals OR'd together...
all_signals = (signal_for_interface(i) for i in self._interfaces)
or_value = functools.reduce(operator.__or__, all_signals, 0)
# ... and tie it to our post-mux signal.
m.d.comb += signal_for_interface(self.shared).eq(or_value)
def elaborate(self, platform):
m = Module()
shared = self.shared
#
# Pass through signals being routed -to- our pre-mux interfaces.
#
for interface in self._interfaces:
m.d.comb += [
# CRC and timer shared signals interface.
interface.data_crc.crc .eq(shared.data_crc.crc),
interface.timer.tx_allowed .eq(shared.timer.tx_allowed),
interface.timer.tx_timeout .eq(shared.timer.tx_timeout),
interface.timer.rx_timeout .eq(shared.timer.rx_timeout),
# Detectors.
shared.handshakes_in .connect(interface.handshakes_in),
shared.tokenizer .connect(interface.tokenizer),
# Rx interface.
shared.rx .connect(interface.rx),
interface.rx_complete .eq(shared.rx_complete),
interface.rx_ready_for_response .eq(shared.rx_ready_for_response),
interface.rx_invalid .eq(shared.rx_invalid),
interface.rx_pid_toggle .eq(shared.rx_pid_toggle),
# State signals.
interface.speed .eq(shared.speed),
interface.active_config .eq(shared.active_config),
interface.active_address .eq(shared.active_address)
]
#
# Multiplex the signals being routed -from- our pre-mux interface.
#
self._multiplex_signals(m,
when='address_changed',
multiplex=['address_changed', 'new_address']
)
self._multiplex_signals(m,
when='config_changed',
multiplex=['config_changed', 'new_config']
)
# Connect up our transmit interface.
m.submodules.tx_mux = tx_mux = OneHotMultiplexer(
interface_type=USBInStreamInterface,
mux_signals=('payload',),
or_signals=('valid', 'first', 'last'),
pass_signals=('ready',)
)
tx_mux.add_interfaces(i.tx for i in self._interfaces)
m.d.comb += self.shared.tx.stream_eq(tx_mux.output)
# OR together all of our handshake-generation requests...
self.or_join_interface_signals(m, lambda interface : interface.handshakes_out.ack)
self.or_join_interface_signals(m, lambda interface : interface.handshakes_out.nak)
self.or_join_interface_signals(m, lambda interface : interface.handshakes_out.stall)
# ... our CRC start signals...
self.or_join_interface_signals(m, lambda interface : interface.data_crc.start)
# ... and our timer start signals.
self.or_join_interface_signals(m, lambda interface : interface.timer.start)
# Finally, connect up our transmit PID select.
conditional = m.If
# We'll connect our PID toggle to whichever interface has a valid transmission going.
for interface in self._interfaces:
with conditional(interface.tx.valid | Past(interface.tx.valid, domain="usb")):
m.d.comb += shared.tx_pid_toggle.eq(interface.tx_pid_toggle)
conditional = m.Elif
return m
| python | 10,697 |
async def application(scope, receive, send):
assert scope['type'] == 'http'
await send(
{
'type': 'http.response.start',
'status': 204,
'headers': [],
}
)
| python | 220 |
"""
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import math
import numpy as np
import os
from os.path import join
import torch
from maskr.datagen.anchors import generate_pyramid_anchors
import logging
log = logging.getLogger()
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
##### datagen ##################################################################
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Images are resized to >= min and <=max. if cant do both then max is enforced
IMAGE_SHAPE = [1024, 1024]
# If True, pad images with zeros such that they're (max_dim by max_dim)
IMAGE_PADDING = True # currently, the False option is not supported
# Image mean (RGB)
MEAN_PIXEL = [123.7, 116.8, 103.9]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
WORKERS = os.cpu_count()
BATCH_SIZE = 1
AUGMENT = False
SHUFFLE = True
####### training ##################################################################
# names of weight files
IMAGENET_MODEL_WEIGHTS = "resnet50_imagenet.pth"
COCO_MODEL_WEIGHTS = "mask_rcnn_coco.pth"
# NUMBER OF GPUs to use. For CPU use 0
GPU_COUNT = torch.cuda.device_count()
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimzer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
#### calculated
ANCHORS = None
BACKBONE_SHAPES = None
DEVICE = "cpu"
WEIGHTS = None
######### backbone ################################################################
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
########## RPN ################################################################
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can reduce this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after non-maximum supression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Bounding box refinement standard deviation for RPN
RPN_BBOX_STD_DEV = [0.1, 0.1, 0.2, 0.2]
##### roialign ###########################################################################
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
MASK_SHAPE = [28, 28]
#### detection #########################################################################
# for final detections
BBOX_STD_DEV = [0.1, 0.1, 0.2, 0.2]
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
#### development and debugging ##############################################################
# stay compatible with original for comparison
# NOTE GPU convolutions do not produce consistent results on same input.
COMPAT = False
# if false then run rpn only
HEAD = True
############################################################################################
def __init__(self):
"""Set values of computed attributes."""
if self.GPU_COUNT > 0:
self.DEVICE = "cuda"
torch.backends.cudnn.benchmark = True
else:
self.DEVICE = "cpu"
torch.backends.cudnn.benchmark = False
# default weights is pretrained coco
self.WEIGHTS = os.path.abspath(join(os.path.dirname(__file__),
os.pardir, "data/models",
self.COCO_MODEL_WEIGHTS))
# Compute backbone size from input image size
self.BACKBONE_SHAPES = np.array(
[[int(math.ceil(self.IMAGE_SHAPE[0] / stride)),
int(math.ceil(self.IMAGE_SHAPE[1] / stride))]
for stride in self.BACKBONE_STRIDES])
# Generate Anchors here as used by dataset and model
self.ANCHORS = generate_pyramid_anchors(self.RPN_ANCHOR_SCALES,
self.RPN_ANCHOR_RATIOS,
self.BACKBONE_SHAPES,
self.BACKBONE_STRIDES,
self.RPN_ANCHOR_STRIDE)
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n") | python | 6,549 |
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/mkl:build_defs.bzl", "mkl_repository")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@io_bazel_rules_closure//closure/private:java_import_external.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/keras_applications_archive:workspace.bzl", keras_applications = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
aws()
flatbuffers()
highwayhash()
hwloc()
icu()
keras_applications()
kissfft()
jpeg()
nasm()
pasta()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_repositories(path_prefix, tf_repo_name)
tf_bind()
# Define all external repositories required by TensorFlow
def tf_repositories(path_prefix = "", tf_repo_name = ""):
"""All external dependencies for TF builds."""
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
sycl_configure(name = "local_config_sycl")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
native.local_repository(
name = "local_config_mlir",
path = "third_party/mlir",
)
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo = "../arm_compiler",
)
mkl_repository(
name = "mkl_linux",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "a936d6b277a33d2a027a024ea8e65df62bd2e162c7ca52c48486ed9d5dc27160",
strip_prefix = "mklml_lnx_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.20-rc/mklml_lnx_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.20-rc/mklml_lnx_2019.0.5.20190502.tgz",
],
)
mkl_repository(
name = "mkl_windows",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "535857b17643d7f7546b58fc621244e7cfcc4fff2aa2ebd3fc5b4e126bfc36cf",
strip_prefix = "mklml_win_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.20-rc/mklml_win_2019.0.5.20190502.zip",
"https://github.com/intel/mkl-dnn/releases/download/v0.20-rc/mklml_win_2019.0.5.20190502.zip",
],
)
mkl_repository(
name = "mkl_darwin",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "2fbb71a0365d42a39ea7906568d69b1db3bfc9914fee75eedb06c5f32bf5fa68",
strip_prefix = "mklml_mac_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.20-rc/mklml_mac_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.20-rc/mklml_mac_2019.0.5.20190502.tgz",
],
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
# Important: If you are upgrading MKL-DNN, then update the version numbers
# in third_party/mkl_dnn/mkldnn.BUILD. In addition, the new version of
# MKL-DNN might require upgrading MKL ML libraries also. If they need to be
# upgraded then update the version numbers on all three versions above
# (Linux, Mac, Windows).
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "a198a9bd3c584607e6a467f780beca92c8411cd656fcc8ec6fa5abe73d4af823",
strip_prefix = "mkl-dnn-0.20.3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v0.20.3.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v0.20.3.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "fcc2d951f7170eade0cfdd0d8d1d58e3e7785bd326bca6555f3722f8cba71811",
strip_prefix = "mkl-dnn-1.0-pc2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v1.0-pc2.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v1.0-pc2.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
sha256 = "acd93f6baaedc4414ebd08b33bebca7c7a46888916101d8c0b8083573526d070",
strip_prefix = "abseil-cpp-43ef2148c0936ebf7cb4be6b19927a9d9d145b8f",
urls = [
"http://mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz",
],
)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "f3d69ac773ecaf3602cb940040390d4e71a501bb145ca9e01ce5464cf6d4eb68",
strip_prefix = "eigen-eigen-049af2f56331",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/bitbucket.org/eigen/eigen/get/049af2f56331.tar.gz",
"https://bitbucket.org/eigen/eigen/get/049af2f56331.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "4c622a5c7b9feb9615d4723b03a13142a7f3f813f9296861d5401282b9fbea96",
strip_prefix = "tools-0e906ebc527eab1cdbf7adabff5b474da9562e9f/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
"https://github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "5fc1972471cd8e2b8b64ea017590193739fc88d9818e3d086621e5c08e86ea35",
strip_prefix = "libxsmm-1.11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.11.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.11.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "fd0c3e3b50f32af332b53857f8cd1bfa009e33d1eeecabc5c79a4825d906a90c",
strip_prefix = "google-cloud-cpp-0.10.0",
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v0.10.0.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v0.10.0.tar.gz",
],
)
tf_http_archive(
name = "com_github_googleapis_googleapis",
build_file = clean_dep("//third_party:googleapis.BUILD"),
sha256 = "824870d87a176f26bcef663e92051f532fac756d1a06b404055dc078425f4378",
strip_prefix = "googleapis-f81082ea1e2f85c43649bee26e0d9871d4b41cdb",
system_build_file = clean_dep("//third_party/systemlibs:googleapis.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
"https://github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "6678b484d929f2d0d3229d8ac4e3b815a950c86bb9f17851471d143f6d4f7834",
strip_prefix = "gemmlowp-12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
"https://github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0",
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png_archive",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "d02fc4e95cfef672b45052e221617a050b7f2e20103661cda88387349a9b1327",
strip_prefix = "sqlite-amalgamation-3280000",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2019/sqlite-amalgamation-3280000.zip",
"https://www.sqlite.org/2019/sqlite-amalgamation-3280000.zip",
],
)
tf_http_archive(
name = "gif_archive",
build_file = clean_dep("//third_party:gif.BUILD"),
patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"),
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
strip_prefix = "six-1.10.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "functools32_archive",
build_file = clean_dep("//third_party:functools32.BUILD"),
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "fe939df4583692f0512161ec1c880e0a10e71e6a232da045ab8edd3756fbadf0",
strip_prefix = "gast-0.2.2",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz",
"https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = clean_dep("//third_party:opt_einsum.BUILD"),
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "3d0f39e0920379ff1393de04b573bca3484d82a5f8b939e9e83b20b6106c9bbe",
strip_prefix = "abseil-py-pypi-v0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.7.1.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.7.1.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
# 310ba5ee72661c081129eb878c1bbcec936b20f0 is based on 3.8.0 with a fix for protobuf.bzl.
PROTOBUF_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz",
"https://github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz",
]
PROTOBUF_SHA256 = "b9e92f9af8819bbbc514e2902aec860415b70209f31dfc8c4fa72515a5df9d59"
PROTOBUF_STRIP_PREFIX = "protobuf-310ba5ee72661c081129eb878c1bbcec936b20f0"
# protobuf depends on @zlib, it has to be renamed to @zlib_archive because "zlib" is already
# defined using bind for grpc.
PROTOBUF_PATCH = "//third_party/protobuf:protobuf.patch"
tf_http_archive(
name = "com_google_protobuf",
patch_file = clean_dep(PROTOBUF_PATCH),
sha256 = PROTOBUF_SHA256,
strip_prefix = PROTOBUF_STRIP_PREFIX,
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = PROTOBUF_URLS,
)
tf_http_archive(
name = "nsync",
sha256 = "704be7f58afa47b99476bbac7aafd1a9db4357cef519db361716f13538547ffd",
strip_prefix = "nsync-1.20.2",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.20.2.tar.gz",
"https://github.com/google/nsync/archive/1.20.2.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
strip_prefix = "pcre-8.42",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
"http://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
],
)
tf_http_archive(
name = "swig",
build_file = clean_dep("//third_party:swig.BUILD"),
sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
strip_prefix = "swig-3.0.8",
system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "4376ac72b95572fb6c4fbffefb97c7ea0dd083e1974c0e44cd7e49396f454839",
strip_prefix = "curl-7.65.3",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.65.3.tar.gz",
"https://curl.haxx.se/download/curl-7.65.3.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "grpc",
sha256 = "67a6c26db56f345f7cee846e681db2c23f919eba46dd639b09462d1b6203d28c",
strip_prefix = "grpc-4566c2a29ebec0835643b972eb99f4306c4234a3",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/4566c2a29ebec0835643b972eb99f4306c4234a3.tar.gz",
"https://github.com/grpc/grpc/archive/4566c2a29ebec0835643b972eb99f4306c4234a3.tar.gz",
],
)
tf_http_archive(
name = "com_github_nanopb_nanopb",
sha256 = "8bbbb1e78d4ddb0a1919276924ab10d11b631df48b657d960e0c795a25515735",
build_file = "@grpc//third_party:nanopb.BUILD",
strip_prefix = "nanopb-f8ac463766281625ad710900479130c7fcb4d63b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
"https://github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
# TODO(phawkins): currently, this rule uses an unofficial LLVM mirror.
# Switch to an official source of snapshots if/when possible.
tf_http_archive(
name = "llvm",
build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
sha256 = "88012afcd6d8238430d39967b62e5599bc31d9c4cdc6d20281bedf1020b7000b",
strip_prefix = "llvm-b7d166cebcf619a3691eed3f994384aab3d80fa6",
urls = [
"https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/b7d166cebcf619a3691eed3f994384aab3d80fa6.tar.gz",
"https://github.com/llvm-mirror/llvm/archive/b7d166cebcf619a3691eed3f994384aab3d80fa6.tar.gz",
],
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
strip_prefix = "jsoncpp-1.8.4",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "1188e29000013ed6517168600fc35a010d58c5d321846d6a6dfee74e4c788b45",
strip_prefix = "boringssl-7f634429a04abc48e2eb041c81c5235816c96514",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
"https://github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
],
)
tf_http_archive(
name = "zlib_archive",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "ada7e99087c4ed477bfdf11413f2ba8db8a840ba9bbf8ac94f4f3972e2a7cec9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
"http://www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
strip_prefix = "snappy-1.1.7",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.7.tar.gz",
"https://github.com/google/snappy/archive/1.1.7.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
patch_file = clean_dep("//third_party/nccl:archive.patch"),
sha256 = "9a7633e224982e2b60fa6b397d895d20d6b7498e3e02f46f98a5a4e187c5a44c",
strip_prefix = "nccl-0ceaec9cee96ae7658aa45686853286651f36384",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/0ceaec9cee96ae7658aa45686853286651f36384.tar.gz",
"https://github.com/nvidia/nccl/archive/0ceaec9cee96ae7658aa45686853286651f36384.tar.gz",
],
)
tf_http_archive(
name = "kafka",
build_file = clean_dep("//third_party:kafka/BUILD"),
patch_file = clean_dep("//third_party/kafka:config.patch"),
sha256 = "cc6ebbcd0a826eec1b8ce1f625ffe71b53ef3290f8192b6cae38412a958f4fd3",
strip_prefix = "librdkafka-0.11.5",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/edenhill/librdkafka/archive/v0.11.5.tar.gz",
"https://github.com/edenhill/librdkafka/archive/v0.11.5.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"http://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"http://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "a17501717ef7c8dda4dba73ded50c0d7cde440fd721acfeacbf19786ceac1ed6",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
"http://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"http://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
sha256 = "6bfa06ab52a650ae7ee6963143a0bbc667d6504822cbd9670369b598f18c58c3",
strip_prefix = "cub-1.8.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.8.0.zip",
"https://github.com/NVlabs/cub/archive/1.8.0.zip",
],
)
tf_http_archive(
name = "rocprim_archive",
build_file = clean_dep("//third_party:rocprim.BUILD"),
sha256 = "3c178461ead70ce6adb60c836a35a52564968af31dfa81f4157ab72b5f14d31f",
strip_prefix = "rocPRIM-4a33d328f8352df1654271939da66914f2334424",
urls = [
"https://mirror.bazel.build/github.com/ROCmSoftwarePlatform/rocPRIM/archive/4a33d328f8352df1654271939da66914f2334424.tar.gz",
"https://github.com/ROCmSoftwarePlatform/rocPRIM/archive/4a33d328f8352df1654271939da66914f2334424.tar.gz",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa",
strip_prefix = "cython-0.28.4",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz",
"https://github.com/cython/cython/archive/0.28.4.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_smartreply",
build_file = clean_dep("//third_party:tflite_smartreply.BUILD"),
sha256 = "8980151b85a87a9c1a3bb1ed4748119e4a85abd3cb5744d83da4d4bd0fbeef7c",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
tf_http_archive(
name = "tbb",
build_file = clean_dep("//third_party/ngraph:tbb.BUILD"),
sha256 = "c3245012296f09f1418b78a8c2f17df5188b3bd0db620f7fd5fabe363320805a",
strip_prefix = "tbb-2019_U1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/01org/tbb/archive/2019_U1.zip",
"https://github.com/01org/tbb/archive/2019_U1.zip",
],
)
tf_http_archive(
name = "ngraph",
build_file = clean_dep("//third_party/ngraph:ngraph.BUILD"),
sha256 = "a1780f24a1381fc25e323b4b2d08b6ef5129f42e011305b2a34dcf43a48030d5",
strip_prefix = "ngraph-0.11.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
"https://github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party/ngraph:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "ngraph_tf",
build_file = clean_dep("//third_party/ngraph:ngraph_tf.BUILD"),
sha256 = "742a642d2c6622277df4c902b6830d616d0539cc8cd843d6cdb899bb99e66e36",
strip_prefix = "ngraph-tf-0.9.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
"https://github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://mirror.bazel.build/github.com/pybind/pybind11/archive/v2.3.0.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.3.0.tar.gz",
],
sha256 = "0f34838f2c8024a6765168227ba587b3687729ebf03dc912f88ff75c7aa9cfe8",
strip_prefix = "pybind11-2.3.0",
build_file = clean_dep("//third_party:pybind11.BUILD"),
)
tf_http_archive(
name = "wrapt",
build_file = clean_dep("//third_party:wrapt.BUILD"),
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
def tf_bind():
"""Bind targets for some external repositories"""
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# gRPC wants a cares dependency but its contents is not actually
# important since we have set GRPC_ARES=0 in .bazelrc
native.bind(
name = "cares",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@grpc//:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@grpc//:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@grpc//:grpc++_unsecure",
)
# Needed by gRPC
native.bind(
name = "libssl",
actual = "@boringssl//:ssl",
)
# Needed by gRPC
native.bind(
name = "nanopb",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by gRPC
native.bind(
name = "protobuf",
actual = "@com_google_protobuf//:protobuf",
)
# gRPC expects //external:protobuf_clib and //external:protobuf_compiler
# to point to Protobuf's compiler library.
native.bind(
name = "protobuf_clib",
actual = "@com_google_protobuf//:protoc_lib",
)
# Needed by gRPC
native.bind(
name = "protobuf_headers",
actual = "@com_google_protobuf//:protobuf_headers",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
# Needed by gRPC
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
| python | 49,367 |
import os.path as osp
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module
class VideoDataset(BaseDataset):
"""Video dataset for action recognition.
The dataset loads raw videos and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
a sample video with the filepath and label, which are split with a
whitespace. Example of a annotation file:
```
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
some/path/003.mp4 2
some/path/004.mp4 3
some/path/005.mp4 3
```
"""
def load_annotations(self):
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
filename, label = line.split(' ')
if self.data_root is not None:
filename = osp.join(self.data_root, filename)
video_infos.append(dict(filename=filename, label=int(label)))
return video_infos
| python | 1,092 |
# SPDX-License-Identifier: Apache-2.0
#
# http://nexb.com and https://github.com/nexB/scancode.io
# The ScanCode.io software is licensed under the Apache License version 2.0.
# Data generated with ScanCode.io is provided as-is without warranties.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Data Generated with ScanCode.io is provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode.io should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
#
# ScanCode.io is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode.io for support and download.
from django import forms
from django.apps import apps
from django.core.exceptions import ValidationError
import django_filters
from scanpipe.models import CodebaseResource
from scanpipe.models import DiscoveredPackage
from scanpipe.models import Project
from scanpipe.pipes.fetch import fetch_urls
scanpipe_app_config = apps.get_app_config("scanpipe")
class InputsBaseForm(forms.Form):
input_files = forms.FileField(
required=False,
widget=forms.ClearableFileInput(
attrs={"class": "file-input", "multiple": True},
),
)
input_urls = forms.CharField(
label="Download URLs",
required=False,
help_text="Provide one or more URLs to download, one per line.",
widget=forms.Textarea(
attrs={
"class": "textarea",
"rows": 2,
"placeholder": "https://domain.com/archive.zip",
},
),
)
class Media:
js = ("add-inputs.js",)
def clean_input_urls(self):
"""
Fetch the `input_urls` and set the `downloads` objects in the cleaned_data.
A validation error is raised if at least one URL could not be fetched.
"""
input_urls = self.cleaned_data.get("input_urls", [])
self.cleaned_data["downloads"], errors = fetch_urls(input_urls)
if errors:
raise ValidationError("Could not fetch: " + "\n".join(errors))
return input_urls
def handle_inputs(self, project):
input_files = self.files.getlist("input_files")
downloads = self.cleaned_data.get("downloads")
if input_files:
project.add_uploads(input_files)
if downloads:
project.add_downloads(downloads)
class PipelineBaseForm(forms.Form):
pipeline = forms.ChoiceField(
choices=scanpipe_app_config.get_pipeline_choices(),
required=False,
)
execute_now = forms.BooleanField(
label="Execute pipeline now",
initial=True,
required=False,
)
def handle_pipeline(self, project):
pipeline = self.cleaned_data["pipeline"]
execute_now = self.cleaned_data["execute_now"]
if pipeline:
project.add_pipeline(pipeline, execute_now)
class ProjectForm(InputsBaseForm, PipelineBaseForm, forms.ModelForm):
class Meta:
model = Project
fields = [
"name",
"input_files",
"input_urls",
"pipeline",
"execute_now",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
name_field = self.fields["name"]
name_field.widget.attrs["class"] = "input"
name_field.widget.attrs["autofocus"] = True
name_field.help_text = "The unique name of your project."
def save(self, *args, **kwargs):
project = super().save(*args, **kwargs)
self.handle_inputs(project)
self.handle_pipeline(project)
return project
class AddInputsForm(InputsBaseForm, forms.Form):
def save(self, project):
self.handle_inputs(project)
return project
class AddPipelineForm(PipelineBaseForm):
def __init__(self, *args, **kwargs):
"""
The `pipeline` field is required in the context of this form.
"""
super().__init__(*args, **kwargs)
self.fields["pipeline"].required = True
def save(self, project):
self.handle_pipeline(project)
return project
class ProjectFilterSet(django_filters.FilterSet):
search = django_filters.CharFilter(field_name="name", lookup_expr="icontains")
class Meta:
model = Project
fields = ["search"]
class ResourceFilterSet(django_filters.FilterSet):
class Meta:
model = CodebaseResource
fields = [
"programming_language",
"mime_type",
]
class PackageFilterSet(django_filters.FilterSet):
class Meta:
model = DiscoveredPackage
fields = ["type", "license_expression"]
| python | 5,233 |
# -*- coding: utf-8 -*-
"""
Widget for plotting phase frequency response phi(f)
Author: Christian Muenker 2015
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from ..compat import QCheckBox, QWidget, QComboBox, QHBoxLayout, QFrame
import numpy as np
import pyfda.filterbroker as fb
from pyfda.pyfda_rc import params
from pyfda.plot_widgets.plot_utils import MplWidget
from pyfda.pyfda_lib import calc_Hcomplex
# TODO: ax.clear() should not be neccessary for each replot?
# TODO: Canvas should be grey when disabled
class PlotPhi(QWidget):
def __init__(self, parent):
super(PlotPhi, self).__init__(parent)
self.cmbUnitsPhi = QComboBox(self)
units = ["rad", "rad/pi", "deg"]
scales = [1., 1./ np.pi, 180./np.pi]
for unit, scale in zip(units, scales):
self.cmbUnitsPhi.addItem(unit, scale)
self.cmbUnitsPhi.setObjectName("cmbUnitsA")
self.cmbUnitsPhi.setToolTip("Set unit for phase.")
self.cmbUnitsPhi.setCurrentIndex(0)
self.cmbUnitsPhi.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.chkWrap = QCheckBox("Wrapped Phase", self)
self.chkWrap.setChecked(False)
self.chkWrap.setToolTip("Plot phase wrapped to +/- pi")
layHControls = QHBoxLayout()
# layHControls.addStretch(10)
layHControls.addWidget(self.cmbUnitsPhi)
layHControls.addWidget(self.chkWrap)
layHControls.addStretch(10)
# This widget encompasses all control subwidgets:
self.frmControls = QFrame(self)
self.frmControls.setObjectName("frmControls")
self.frmControls.setLayout(layHControls)
#----------------------------------------------------------------------
# mplwidget
#----------------------------------------------------------------------
self.mplwidget = MplWidget(self)
self.mplwidget.layVMainMpl.addWidget(self.frmControls)
self.mplwidget.layVMainMpl.setContentsMargins(*params['wdg_margins'])
self.setLayout(self.mplwidget.layVMainMpl)
self.init_axes()
self.draw() # initial drawing
# #=============================================
# # Signals & Slots
# #=============================================
self.chkWrap.clicked.connect(self.draw)
self.cmbUnitsPhi.currentIndexChanged.connect(self.draw)
self.mplwidget.mplToolbar.sigEnabled.connect(self.enable_ui)
#------------------------------------------------------------------------------
def init_axes(self):
"""
Initialize and clear the axes
"""
# self.ax = self.mplwidget.ax
self.ax = self.mplwidget.fig.add_subplot(111)
self.ax.clear()
self.ax.get_xaxis().tick_bottom() # remove axis ticks on top
self.ax.get_yaxis().tick_left() # remove axis ticks right
#------------------------------------------------------------------------------
def calc_hf(self):
"""
(Re-)Calculate the complex frequency response H(f)
"""
# calculate H_cplx(W) (complex) for W = 0 ... 2 pi:
self.W, self.H_cmplx = calc_Hcomplex(fb.fil[0], params['N_FFT'], wholeF=True)
# replace nan and inf by finite values, otherwise np.unwrap yields
# an array full of nans
self.H_cmplx = np.nan_to_num(self.H_cmplx)
#------------------------------------------------------------------------------
def enable_ui(self):
"""
Triggered when the toolbar is enabled or disabled
"""
self.frmControls.setEnabled(self.mplwidget.mplToolbar.enabled)
if self.mplwidget.mplToolbar.enabled:
self.init_axes()
self.draw()
#------------------------------------------------------------------------------
def draw(self):
"""
Main entry point:
Re-calculate |H(f)| and draw the figure if enabled
"""
if self.mplwidget.mplToolbar.enabled:
self.calc_hf()
self.update_view()
#------------------------------------------------------------------------------
def update_view(self):
"""
Draw the figure with new limits, scale etc without recalculating H(f)
"""
self.unitPhi = self.cmbUnitsPhi.currentText()
f_S2 = fb.fil[0]['f_S'] / 2.
#========= select frequency range to be displayed =====================
#=== shift, scale and select: W -> F, H_cplx -> H_c
F = self.W * f_S2 / np.pi
if fb.fil[0]['freqSpecsRangeType'] == 'sym':
# shift H and F by f_S/2
H = np.fft.fftshift(self.H_cmplx)
F -= f_S2
elif fb.fil[0]['freqSpecsRangeType'] == 'half':
# only use the first half of H and F
H = self.H_cmplx[0:params['N_FFT']//2]
F = F[0:params['N_FFT']//2]
else: # fb.fil[0]['freqSpecsRangeType'] == 'whole'
# use H and F as calculated
H = self.H_cmplx
y_str = r'$\angle H(\mathrm{e}^{\mathrm{j} \Omega})$ in '
if self.unitPhi == 'rad':
y_str += 'rad ' + r'$\rightarrow $'
scale = 1.
elif self.unitPhi == 'rad/pi':
y_str += 'rad' + r'$ / \pi \;\rightarrow $'
scale = 1./ np.pi
else:
y_str += 'deg ' + r'$\rightarrow $'
scale = 180./np.pi
fb.fil[0]['plt_phiLabel'] = y_str
fb.fil[0]['plt_phiUnit'] = self.unitPhi
if self.chkWrap.isChecked():
phi_plt = np.angle(H) * scale
else:
phi_plt = np.unwrap(np.angle(H)) * scale
#---------------------------------------------------------
self.ax.clear() # need to clear, doesn't overwrite
line_phi, = self.ax.plot(F, phi_plt)
#---------------------------------------------------------
self.ax.set_title(r'Phase Frequency Response')
self.ax.set_xlabel(fb.fil[0]['plt_fLabel'])
self.ax.set_ylabel(y_str)
self.ax.set_xlim(fb.fil[0]['freqSpecsRange'])
self.redraw()
#------------------------------------------------------------------------------
def redraw(self):
"""
Redraw the canvas when e.g. the canvas size has changed
"""
self.mplwidget.redraw()
#------------------------------------------------------------------------------
def main():
import sys
from ..compat import QApplication
app = QApplication(sys.argv)
mainw = PlotPhi(None)
app.setActiveWindow(mainw)
mainw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| python | 6,709 |
"""Inserts the current time (in seconds) into the wiki page."""
revision = "$Rev: 10617 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/Timestamp.py $"
#
# The following shows the code for macro, old-style.
#
# The `execute` function serves no purpose other than to illustrate
# the example, it will not be used anymore.
#
# ---- (ignore in your own macro) ----
# --
import time # Trac before version 0.11 was using `time` module
def execute(hdf, txt, env):
t = time.localtime()
return "<b>%s</b>" % time.strftime('%c', t)
# --
# ---- (ignore in your own macro) ----
#
# The following is the converted new-style macro
#
# ---- (reuse for your own macro) ----
# --
from datetime import datetime
# Note: since Trac 0.11, datetime objects are used internally
from genshi.builder import tag
from trac.util.datefmt import format_datetime, utc
from trac.wiki.macros import WikiMacroBase
class TimestampMacro(WikiMacroBase):
_description = "Inserts the current time (in seconds) into the wiki page."
def expand_macro(self, formatter, name, args):
t = datetime.now(utc)
return tag.b(format_datetime(t, '%c'))
# --
# ---- (reuse for your own macro) ----
| python | 1,220 |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for workflow object exports."""
from freezegun import freeze_time
import ddt
from flask.json import dumps
from ggrc_workflows.models import TaskGroupTask, Workflow
from integration.ggrc import TestCase
from integration.ggrc.models import factories
from integration.ggrc_workflows.generator import WorkflowsGenerator
from integration.ggrc_workflows.models import factories as wf_factories
@ddt.ddt
class TestExportEmptyTemplate(TestCase):
"""Test empty export for all workflow object types."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_single_object_export(self):
"""Test empty exports for workflow only."""
data = {
"export_to": "csv",
"objects": [{"object_name": "Workflow", "fields": "all"}]
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
def test_unit_tip(self):
"""Test Workflow's Unit column has hint correctly"""
data = {
"export_to": "csv",
"objects": [{"object_name": "Workflow", "fields": "all"}]
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Allowed values are:\n{}".format(
"\n".join(Workflow.VALID_UNITS)), response.data)
def test_multiple_objects(self):
"""Test empty exports for all workflow object in one query."""
data = [
{"object_name": "Workflow", "fields": "all"},
{"object_name": "TaskGroup", "fields": "all"},
{"object_name": "TaskGroupTask", "fields": "all"},
{"object_name": "Cycle", "fields": "all"},
{"object_name": "CycleTaskGroup", "fields": "all"},
{"object_name": "CycleTaskGroupObjectTask", "fields": "all"},
]
request_body = {
"export_to": "csv",
"objects": data
}
response = self.client.post("/_service/export_csv",
data=dumps(request_body), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Workflow,", response.data)
self.assertIn("Task Group,", response.data)
self.assertIn("Task,", response.data)
self.assertIn("Cycle,", response.data)
self.assertIn("Cycle Task Group,", response.data)
self.assertIn("Cycle Task,", response.data)
def test_tips_tg_task(self):
"""Test if TaskGroupTask date attributes has hints correctly."""
data = {
"export_to": "csv",
"objects": [{"object_name": "TaskGroupTask", "fields": "all"}]
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(2, response.data.count(
"{}\nOnly working days are accepted".format(TaskGroupTask.DATE_HINT)))
class TestExportMultipleObjects(TestCase):
"""Test export of multiple objects."""
def setUp(self):
self.clear_data()
self.client.get("/login")
self.wf_generator = WorkflowsGenerator()
def test_workflow_task_group_mapping(self): # pylint: disable=invalid-name
"""Test workflow and task group mappings."""
with freeze_time("2017-03-07"):
workflow = wf_factories.WorkflowFactory()
workflow_slug = workflow.slug
task_group1 = wf_factories.TaskGroupFactory(workflow=workflow)
task_group1_slug = task_group1.slug
task_group2 = wf_factories.TaskGroupFactory(workflow=workflow)
task_group2_slug = task_group2.slug
data = [
{
"object_name": "Workflow",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": [task_group1_slug],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": [0],
},
},
"fields": "all",
},
]
response = self.export_csv(data)
self.assert200(response)
response_data = response.data
self.assertEqual(3, response_data.count(workflow_slug))
self.assertIn(task_group1_slug, response_data)
self.assertIn(task_group2_slug, response_data)
def test_tg_task(self):
"""Test task group task mappings."""
with freeze_time("2017-03-07"):
workflow = wf_factories.WorkflowFactory()
task_group1 = wf_factories.TaskGroupFactory(workflow=workflow)
task_group1_slug = task_group1.slug
task_group_task1 = wf_factories.TaskGroupTaskFactory(
task_group=task_group1)
task_group_task1_slug = task_group_task1.slug
task_group_task2 = wf_factories.TaskGroupTaskFactory(
task_group=task_group1)
task_group_task2_slug = task_group_task2.slug
data = [
{
"object_name": "TaskGroupTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": [task_group1_slug],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data)
self.assert200(response)
response_data = response.data
self.assertEqual(3, response_data.count(task_group1_slug))
self.assertIn(task_group_task1_slug, response_data)
self.assertIn(task_group_task2_slug, response_data)
def test_workflow_cycle_mapping(self):
"""Test workflow and cycle mappings."""
with freeze_time("2017-03-07"):
workflow = wf_factories.WorkflowFactory()
workflow_slug = workflow.slug
task_group = wf_factories.TaskGroupFactory(workflow=workflow)
wf_factories.TaskGroupTaskFactory(task_group=task_group)
wf_factories.TaskGroupTaskFactory(task_group=task_group)
self.wf_generator.generate_cycle(workflow)
self.wf_generator.activate_workflow(workflow)
def block(obj, obj_id):
return {
"object_name": obj,
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": [obj_id],
},
},
"fields": "all",
}
data = [
{
"object_name": "Cycle",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Workflow",
"slugs": [workflow_slug],
},
},
"fields": "all",
},
block("Workflow", "0"),
block("CycleTaskGroup", "0"),
block("Cycle", "2"),
block("CycleTaskGroupObjectTask", "2"),
block("CycleTaskGroup", "4"),
]
response = self.export_csv(data)
self.assert200(response)
response_data = response.data
self.assertEqual(3, response_data.count(workflow_slug))
self.assertEqual(4, response_data.count("CYCLEGROUP-"))
self.assertEqual(6, response_data.count("CYCLE-"))
self.assertEqual(2, response_data.count("CYCLETASK-"))
def test_cycle_task_objects(self):
"""Test cycle task and various objects."""
with freeze_time("2017-03-07"):
workflow = wf_factories.WorkflowFactory()
task_group = wf_factories.TaskGroupFactory(workflow=workflow)
wf_factories.TaskGroupTaskFactory(task_group=task_group)
wf_factories.TaskGroupTaskFactory(task_group=task_group)
policy = factories.PolicyFactory()
policy_slug = policy.slug
factories.RelationshipFactory(source=task_group, destination=policy)
self.wf_generator.generate_cycle(workflow)
self.wf_generator.activate_workflow(workflow)
data = [
{
"object_name": "CycleTaskGroupObjectTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": [policy_slug],
},
},
"fields": "all",
}, {
"object_name": "Policy",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title"],
},
]
response = self.export_csv(data)
self.assert200(response)
response_data = response.data
self.assertEqual(2, response_data.count("CYCLETASK-"))
self.assertEqual(3, response_data.count(policy_slug))
def test_wf_indirect_relevant_filters(self): # pylint: disable=invalid-name
"""Test related filter for indirect relationships on wf objects."""
with freeze_time("2017-03-07"):
workflow = wf_factories.WorkflowFactory(title="workflow-1")
task_group1 = wf_factories.TaskGroupFactory(workflow=workflow)
wf_factories.TaskGroupTaskFactory(task_group=task_group1)
wf_factories.TaskGroupTaskFactory(task_group=task_group1)
task_group2 = wf_factories.TaskGroupFactory(workflow=workflow)
wf_factories.TaskGroupTaskFactory(task_group=task_group2)
policy = factories.PolicyFactory()
policy_slug = policy.slug
factories.RelationshipFactory(source=task_group1, destination=policy)
self.wf_generator.generate_cycle(workflow)
self.wf_generator.activate_workflow(workflow)
def block(obj):
return {
"object_name": obj,
"fields": ["slug"],
"filters": {
"expression": {
"object_name": "Policy",
"op": {"name": "relevant"},
"slugs": [policy_slug],
},
},
}
data = [
block("Workflow"),
block("Cycle"),
block("CycleTaskGroup"),
block("CycleTaskGroupObjectTask"),
]
response = self.export_csv(data)
self.assert200(response)
response_data = response.data
wf1 = Workflow.query.filter_by(title="workflow-1").first()
cycle = wf1.cycles[0]
cycle_tasks = []
for cycle_task in cycle.cycle_task_group_object_tasks:
for related_object in cycle_task.related_objects():
if related_object.slug == policy_slug:
cycle_tasks.append(cycle_task)
break
cycle_task_groups = list({cycle_task.cycle_task_group
for cycle_task in cycle_tasks})
self.assertEqual(1, response_data.count("WORKFLOW-"))
self.assertRegexpMatches(response_data, ",{}[,\r\n]".format(wf1.slug))
self.assertEqual(1, response_data.count("CYCLE-"))
self.assertRegexpMatches(response_data, ",{}[,\r\n]".format(cycle.slug))
self.assertEqual(1, response_data.count("CYCLEGROUP-"))
self.assertEqual(1, len(cycle_task_groups))
self.assertRegexpMatches(response_data, ",{}[,\r\n]".format(
cycle_task_groups[0].slug))
self.assertEqual(2, response_data.count("CYCLETASK-"))
self.assertEqual(2, len(cycle_tasks))
for cycle_task in cycle_tasks:
self.assertRegexpMatches(response_data, ",{}[,\r\n]".format(
cycle_task.slug))
destinations = [
("Workflow", wf1.slug, 1),
("Cycle", cycle.slug, 1),
("CycleTaskGroupObjectTask", cycle_tasks[0].slug, 1),
("CycleTaskGroupObjectTask", cycle_tasks[1].slug, 1),
]
for object_name, slug, count in destinations:
data = [{
"object_name": "Policy",
"fields": ["slug"],
"filters": {
"expression": {
"object_name": object_name,
"op": {"name": "relevant"},
"slugs": [slug],
},
},
}]
response = self.export_csv(data)
self.assert200(response)
response_data = response.data
self.assertEqual(count, response_data.count(",POLICY-"),
"Count for " + object_name)
self.assertIn("," + policy_slug, response_data)
| python | 12,921 |
#!/usr/bin/env python
# coding:utf-8
# Created on Dec. 5, 2015 Sat to enable i18n support in XX-Net.
# Based on http://stackoverflow.com/questions/18683905/how-to-use-jinja2-and-its-i18n-extenstion-using-babel-outside-flask
#
# I. See jinja2: https://github.com/mitsuhiko/jinja2
# II. See MarkupSafe-0.23.tar.gz: https://pypi.python.org/packages/source/M/MarkupSafe/MarkupSafe-0.23.tar.gz
# III. See Python babel: https://github.com/python-babel/babel
# IV. See pytz-2015.7.tar.gz: https://pypi.python.org/packages/source/p/pytz/pytz-2015.7.tar.gz#md5=252bb731883f37ff9c7f462954e8706d
# V. See Language_contry code list: http://www.fincher.org/Utilities/CountryLanguageList.shtml
# IMPORTANT:
# By the way, module decimal.py and numbers.py are also needed on Windows when run with the bundled Python,
# which were already appended to folder python27/1.0/lib.
# See for these steps at http://tlphoto.googlecode.com/git/jinja2_i18n_howto.txt
# 0. Create the folder structure (no whitespace after the commas!!!)
# mkdir -pv ./lang/{en_US,zh_CN,fa_IR,es_VE,de_DE,ja_JP}/LC_MESSAGES/
# 1. Extract
# pybabel -v extract -F babel.config -o ./lang/messages.pot ./
# 2. Init/Update
# 2.1 Init
# pybabel init -l zh_CN -d ./lang -i ./lang/messages.pot
# 2.2 Update
# pybabel update -l zh_CN -d ./lang -i ./lang/messages.pot
# 3. Compile
# pybabel compile -f -d ./lang
import os
import sys
import locale
# Determines jinja2 and babel library path, and appends them to sys.path
current_path = os.path.dirname(os.path.abspath(__file__))
# When run standalonely
#if __name__ == '__main__':
python_path = os.path.abspath(os.path.join(current_path, os.pardir, 'python27', '1.0'))
python_lib = os.path.abspath(os.path.join(python_path, 'lib'))
noarch_lib = os.path.abspath(os.path.join(python_lib, 'noarch'))
# Michael.X: common lib should put in python27/1.0/lib/noarch, so all platform can use it.
# the path struct is not good because some history reason. python27/1.0/ is a win32 env.
# Appended modules decimal.py and numbers.py were copied from Python code on Windows,
# so they're put in folder python27/1.0/lib
if python_lib not in sys.path:
sys.path.append(python_lib)
# As packages jinja2, markupsafe, babel, pytz are OS-independent,
# they're put in folder python27/1.0/lib/noarch
if noarch_lib not in sys.path:
sys.path.append(noarch_lib)
#print("The current path: %s" % current_path)
#print("The python path: %s" % python_path)
#print(sys.path)
import yaml
from jinja2 import Environment, FileSystemLoader
from babel.support import Translations
class Jinja2I18nHelper():
"""Demonstrates how to use jinja2 i18n engine to internationalize. A class-encapsulated version.
Language files reside under folder lang of the current file location.
"""
def __init__(self):
"""Sets up the i18n environment"""
# The current language, i.e., the default system language
self.current_locale, self.encoding = locale.getdefaultlocale() # tuple, e.g., ('en_US', 'UTF-8')
self.extensions = ['jinja2.ext.i18n', 'jinja2.ext.autoescape', 'jinja2.ext.with_']
# Specifies the language path (the i10n path), ./lang which holds all the translations
self.locale_dir = os.path.join(current_path, "lang")
self.template_dir = "web_ui" # template file root folder
self.loader = FileSystemLoader(self.template_dir)
self.env = Environment(extensions=self.extensions, loader=self.loader) # add any other env options if needed
#print("The current language is %s" % self.current_locale)
#print("The locale dir: %s" % self.locale_dir)
def refresh_env(self, locale_dir, template_dir):
"""Refreshes the locale environment by changing the locale directory and the temple file directory."""
self.locale_dir = locale_dir
self.template_dir = template_dir
self.loader = FileSystemLoader(self.template_dir)
self.env = Environment(extensions=self.extensions, loader=self.loader)
#print("The current path: %s" % current_path)
#print("The locale dir: %s" % self.locale_dir)
#print("The current language is %s" % self.current_locale)
def render(self, template_name, desired_lang):
"""Returns the rendered template with the desired language."""
if not desired_lang:
desired_lang = self.current_locale
# To test simplified Chinese only
#desired_lang = "zh_CN" # Simple Chinese
desired_locales_list = [desired_lang]
#print("Your desired language is %s" % desired_lang)
translations = Translations.load(self.locale_dir, desired_locales_list)
self.env.install_gettext_translations(translations)
template = self.env.get_template(template_name)
return template.render().encode('utf-8') # magic here & avoid error UnicodeEncodeError
# Creates the global singleton object (?)
ihelper = Jinja2I18nHelper()
if __name__ == '__main__':
# Test cases. If not found, en_US is used instead.
# Language_contry code list: http://www.fincher.org/Utilities/CountryLanguageList.shtml
#desired_lang = "en_US" # American English
desired_lang = "zh_CN" # Simple Chinese
#desired_lang = "es_VE" #Venezuela
#desired_lang = "de_DE" # Geman
#desired_lang = "fa_IR" # Iran-Persian
#desired_lang = "ja_JP" # Japanese
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
print("--- launcher/web_ui/about.html ---")
launcher_path = os.path.abspath(os.path.join(root_path, 'launcher'))
print("The launcher_path: %s" % launcher_path)
locale_dir = os.path.abspath(os.path.join(launcher_path, 'lang'))
template_dir = os.path.abspath(os.path.join(launcher_path, 'web_ui'))
ihelper.refresh_env(locale_dir, template_dir)
#print( ihelper.render("about.html", desired_lang) )
print("\n--- launcher/web_ui/menu.yaml ---")
stream = ihelper.render("menu.yaml", desired_lang)
#stream = ihelper.render("menu.yaml", None)
print(yaml.load(stream))
# Test locale in module gae_proxy
print("\n--- gae_proxy/web_ui/menu.yaml ---")
gae_proxy_path = os.path.abspath(os.path.join(root_path, 'gae_proxy'))
print("The gae_proxy_path: %s" % gae_proxy_path)
locale_dir = os.path.abspath(os.path.join(gae_proxy_path, 'lang'))
template_dir = os.path.abspath(os.path.join(gae_proxy_path, 'web_ui'))
ihelper.refresh_env(locale_dir, template_dir)
stream = ihelper.render("menu.yaml", desired_lang)
print(yaml.load(stream))
| python | 6,597 |
"""Helper to check if path is safe to remove."""
from pathlib import Path
from custom_components.racelandshop.share import get_racelandshop
def is_safe_to_remove(path: str) -> bool:
"""Helper to check if path is safe to remove."""
racelandshop = get_racelandshop()
paths = [
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.appdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.netdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.plugin_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.python_script_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.theme_path}"),
Path(f"{racelandshop.core.config_path}/custom_components/"),
]
if Path(path) in paths:
return False
return True
| python | 897 |
# -*- coding: utf-8 -*-
import logging
import os
import sys
import time
from pythonjsonlogger.jsonlogger import JsonFormatter
from sanic.log import DefaultFilter
import ujson
from jussi.typedefs import WebApp
LOG_DATETIME_FORMAT = r'%Y-%m-%dT%H:%M:%S.%s%Z'
os.environ['TZ'] = 'UTC'
time.tzset()
# JsonFormatter.converter = time.gmtime
SUPPORTED_LOG_MESSAGE_KEYS = (
'levelname',
'asctime',
# 'created',
# 'filename',
# 'levelno',
# 'module',
'funcName',
'lineno',
'msecs',
'message',
'name',
'timestamp',
'severity'
# 'pathname',
# 'process',
# 'processName',
# 'relativeCreated',
# 'thread',
# 'threadName'
)
JSON_LOG_FORMAT = ' '.join(
['%({0:s})'.format(i) for i in SUPPORTED_LOG_MESSAGE_KEYS])
class CustomJsonFormatter(JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(
CustomJsonFormatter,
self).add_fields(
log_record,
record,
message_dict)
if getattr(record, 'asctime', None):
log_record['timestamp'] = record.asctime
if 'asctime' in log_record:
del log_record['asctime']
if getattr(record, 'levelname', None):
log_record['severity'] = record.levelname
if 'levelname' in log_record:
del log_record['levelname']
# pylint: disable=no-self-use
def _jsonify_log_record(self, log_record):
"""Returns a json string of the log record."""
return ujson.dumps(log_record)
LOGGING = {
'version': 1,
'filters': {
'accessFilter': {
'()': DefaultFilter,
'param': [0, 10, 20]
},
'errorFilter': {
'()': DefaultFilter,
'param': [30, 40, 50]
}
},
'formatters': {
'simple': {
'()': CustomJsonFormatter,
'format': '%(asctime)s %(name) %(levelname) %(message)',
'datefmt': LOG_DATETIME_FORMAT,
'json_indent': None
},
'json_access': {
'()': CustomJsonFormatter,
'format':
'%(asctime) %(name) %(levelname) %(host) ' +
'%(request) %(message) %(status) %(byte)',
'datefmt': LOG_DATETIME_FORMAT,
'json_indent': None
},
'json_request': {
'()': CustomJsonFormatter,
'format': '%(asctime)s',
},
'json': {
'()': CustomJsonFormatter,
'format': JSON_LOG_FORMAT,
'datefmt': LOG_DATETIME_FORMAT,
'json_indent': None
}
},
'handlers': {
'internal': {
'class': 'logging.StreamHandler',
'filters': ['accessFilter'],
'formatter': 'simple',
'stream': sys.stderr
},
'accessStream': {
'class': 'logging.StreamHandler',
'filters': ['accessFilter'],
'formatter': 'json_access',
'stream': sys.stderr
},
'errorStream': {
'class': 'logging.StreamHandler',
'filters': ['errorFilter'],
'formatter': 'simple',
'stream': sys.stderr
},
'jussiStdOut': {
'class': 'logging.StreamHandler',
'formatter': 'json'
},
'jussiRequest': {
'class': 'logging.StreamHandler',
'formatter': 'json_request'
}
},
'loggers': {
'sanic': {
'level': logging.INFO,
'handlers': ['errorStream']
},
'network': {
'level': logging.INFO,
'handlers': []
},
'jussi': {
'level': logging.INFO,
'handlers': ['jussiStdOut']
},
'jussi_debug': {
'level': logging.INFO,
'handlers': ['jussiStdOut']
},
'jussi_request': {
'level': logging.INFO,
'handlers': ['jussiRequest']
},
}
}
def setup_logging(app: WebApp, log_level: str = None) -> WebApp:
LOG_LEVEL = log_level or getattr(logging, os.environ.get('LOG_LEVEL', 'INFO'))
LOGGING['loggers']['sanic']['level'] = LOG_LEVEL
LOGGING['loggers']['network']['level'] = LOG_LEVEL
LOGGING['loggers']['jussi']['level'] = LOG_LEVEL
LOGGING['loggers']['jussi_debug']['level'] = os.environ.get(
'REQUEST_LOG_LEVEL', logging.INFO)
LOGGING['loggers']['jussi_request']['level'] = LOG_LEVEL
logger = logging.getLogger('jussi')
logger.info('configuring jussi logger')
app.config.logger = logger
return app
| python | 4,663 |
"""InfluxDBClient is client for API defined in https://github.com/influxdata/influxdb/blob/master/http/swagger.yml."""
from __future__ import absolute_import
import configparser
import os
import base64
from influxdb_client import Configuration, ApiClient, HealthCheck, HealthService, Ready, ReadyService
from influxdb_client.client.authorizations_api import AuthorizationsApi
from influxdb_client.client.bucket_api import BucketsApi
from influxdb_client.client.delete_api import DeleteApi
from influxdb_client.client.labels_api import LabelsApi
from influxdb_client.client.organizations_api import OrganizationsApi
from influxdb_client.client.query_api import QueryApi, QueryOptions
from influxdb_client.client.tasks_api import TasksApi
from influxdb_client.client.users_api import UsersApi
from influxdb_client.client.write_api import WriteApi, WriteOptions, PointSettings
class InfluxDBClient(object):
"""InfluxDBClient is client for InfluxDB v2."""
def __init__(self, url, token, debug=None, timeout=10_000, enable_gzip=False, org: str = None,
default_tags: dict = None, **kwargs) -> None:
"""
Initialize defaults.
:param url: InfluxDB server API url (ex. http://localhost:8086).
:param token: auth token
:param debug: enable verbose logging of http requests
:param timeout: HTTP client timeout setting for a request specified in milliseconds.
If one number provided, it will be total request timeout.
It can also be a pair (tuple) of (connection, read) timeouts.
:param enable_gzip: Enable Gzip compression for http requests. Currently only the "Write" and "Query" endpoints
supports the Gzip compression.
:param org: organization name (used as a default in query and write API)
:key bool verify_ssl: Set this to false to skip verifying SSL certificate when calling API from https server.
:key str ssl_ca_cert: Set this to customize the certificate file to verify the peer.
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
authentication.
:key int connection_pool_maxsize: Number of connections to save that can be reused by urllib3.
Defaults to "multiprocessing.cpu_count() * 5".
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
except batching writes. As a default there is no one retry strategy.
:key bool auth_basic: Set this to true to enable basic authentication when talking to a InfluxDB 1.8.x that
does not use auth-enabled but is protected by a reverse proxy with basic authentication.
(defaults to false, don't set to true when talking to InfluxDB 2)
:key list[str] profilers: list of enabled Flux profilers
"""
self.url = url
self.token = token
self.org = org
self.default_tags = default_tags
conf = _Configuration()
if self.url.endswith("/"):
conf.host = self.url[:-1]
else:
conf.host = self.url
conf.enable_gzip = enable_gzip
conf.debug = debug
conf.verify_ssl = kwargs.get('verify_ssl', True)
conf.ssl_ca_cert = kwargs.get('ssl_ca_cert', None)
conf.proxy = kwargs.get('proxy', None)
conf.proxy_headers = kwargs.get('proxy_headers', None)
conf.connection_pool_maxsize = kwargs.get('connection_pool_maxsize', conf.connection_pool_maxsize)
conf.timeout = timeout
auth_token = self.token
auth_header_name = "Authorization"
auth_header_value = "Token " + auth_token
auth_basic = kwargs.get('auth_basic', False)
if auth_basic:
auth_header_value = "Basic " + base64.b64encode(token.encode()).decode()
retries = kwargs.get('retries', False)
self.profilers = kwargs.get('profilers', None)
self.api_client = ApiClient(configuration=conf, header_name=auth_header_name,
header_value=auth_header_value, retries=retries)
def __enter__(self):
"""
Enter the runtime context related to this object.
It will bind this method’s return value to the target(s)
specified in the `as` clause of the statement.
return: self instance
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the runtime context related to this object and close the client."""
self.close()
@classmethod
def from_config_file(cls, config_file: str = "config.ini", debug=None, enable_gzip=False):
"""
Configure client via configuration file. The configuration has to be under 'influx' section.
The supported formats:
- https://docs.python.org/3/library/configparser.html
- https://toml.io/en/
Configuration options:
- url
- org
- token
- timeout,
- verify_ssl
- ssl_ca_cert
- connection_pool_maxsize
- auth_basic
- profilers
- proxy
config.ini example::
[influx2]
url=http://localhost:8086
org=my-org
token=my-token
timeout=6000
connection_pool_maxsize=25
auth_basic=false
profilers=query,operator
proxy=http:proxy.domain.org:8080
[tags]
id = 132-987-655
customer = California Miner
data_center = ${env.data_center}
config.toml example::
[influx2]
url = "http://localhost:8086"
token = "my-token"
org = "my-org"
timeout = 6000
connection_pool_maxsize = 25
auth_basic = false
profilers="query, operator"
proxy = "http://proxy.domain.org:8080"
[tags]
id = "132-987-655"
customer = "California Miner"
data_center = "${env.data_center}"
"""
config = configparser.ConfigParser()
config.read(config_file)
def config_value(key: str):
return config['influx2'][key].strip('"')
url = config_value('url')
token = config_value('token')
timeout = None
if config.has_option('influx2', 'timeout'):
timeout = config_value('timeout')
org = None
if config.has_option('influx2', 'org'):
org = config_value('org')
verify_ssl = True
if config.has_option('influx2', 'verify_ssl'):
verify_ssl = config_value('verify_ssl')
ssl_ca_cert = None
if config.has_option('influx2', 'ssl_ca_cert'):
ssl_ca_cert = config_value('ssl_ca_cert')
connection_pool_maxsize = None
if config.has_option('influx2', 'connection_pool_maxsize'):
connection_pool_maxsize = config_value('connection_pool_maxsize')
auth_basic = False
if config.has_option('influx2', 'auth_basic'):
auth_basic = config_value('auth_basic')
default_tags = None
if config.has_section('tags'):
tags = {k: v.strip('"') for k, v in config.items('tags')}
default_tags = dict(tags)
profilers = None
if config.has_option('influx2', 'profilers'):
profilers = [x.strip() for x in config_value('profilers').split(',')]
proxy = None
if config.has_option('influx2', 'proxy'):
proxy = config_value('proxy')
return cls(url, token, debug=debug, timeout=_to_int(timeout), org=org, default_tags=default_tags,
enable_gzip=enable_gzip, verify_ssl=_to_bool(verify_ssl), ssl_ca_cert=ssl_ca_cert,
connection_pool_maxsize=_to_int(connection_pool_maxsize), auth_basic=_to_bool(auth_basic),
profilers=profilers, proxy=proxy)
@classmethod
def from_env_properties(cls, debug=None, enable_gzip=False):
"""
Configure client via environment properties.
Supported environment properties:
- INFLUXDB_V2_URL
- INFLUXDB_V2_ORG
- INFLUXDB_V2_TOKEN
- INFLUXDB_V2_TIMEOUT
- INFLUXDB_V2_VERIFY_SSL
- INFLUXDB_V2_SSL_CA_CERT
- INFLUXDB_V2_CONNECTION_POOL_MAXSIZE
- INFLUXDB_V2_AUTH_BASIC
"""
url = os.getenv('INFLUXDB_V2_URL', "http://localhost:8086")
token = os.getenv('INFLUXDB_V2_TOKEN', "my-token")
timeout = os.getenv('INFLUXDB_V2_TIMEOUT', "10000")
org = os.getenv('INFLUXDB_V2_ORG', "my-org")
verify_ssl = os.getenv('INFLUXDB_V2_VERIFY_SSL', "True")
ssl_ca_cert = os.getenv('INFLUXDB_V2_SSL_CA_CERT', None)
connection_pool_maxsize = os.getenv('INFLUXDB_V2_CONNECTION_POOL_MAXSIZE', None)
auth_basic = os.getenv('INFLUXDB_V2_AUTH_BASIC', "False")
prof = os.getenv("INFLUXDB_V2_PROFILERS", None)
profilers = None
if prof is not None:
profilers = [x.strip() for x in prof.split(',')]
default_tags = dict()
for key, value in os.environ.items():
if key.startswith("INFLUXDB_V2_TAG_"):
default_tags[key[16:].lower()] = value
return cls(url, token, debug=debug, timeout=_to_int(timeout), org=org, default_tags=default_tags,
enable_gzip=enable_gzip, verify_ssl=_to_bool(verify_ssl), ssl_ca_cert=ssl_ca_cert,
connection_pool_maxsize=_to_int(connection_pool_maxsize), auth_basic=_to_bool(auth_basic),
profilers=profilers)
def write_api(self, write_options=WriteOptions(), point_settings=PointSettings()) -> WriteApi:
"""
Create a Write API instance.
:param point_settings:
:param write_options: write api configuration
:return: write api instance
"""
return WriteApi(influxdb_client=self, write_options=write_options, point_settings=point_settings)
def query_api(self, query_options: QueryOptions = QueryOptions()) -> QueryApi:
"""
Create a Query API instance.
:param query_options: optional query api configuration
:return: Query api instance
"""
return QueryApi(self, query_options)
def close(self):
"""Shutdown the client."""
self.__del__()
def __del__(self):
"""Shutdown the client."""
if self.api_client:
self.api_client.__del__()
self.api_client = None
def buckets_api(self) -> BucketsApi:
"""
Create the Bucket API instance.
:return: buckets api
"""
return BucketsApi(self)
def authorizations_api(self) -> AuthorizationsApi:
"""
Create the Authorizations API instance.
:return: authorizations api
"""
return AuthorizationsApi(self)
def users_api(self) -> UsersApi:
"""
Create the Users API instance.
:return: users api
"""
return UsersApi(self)
def organizations_api(self) -> OrganizationsApi:
"""
Create the Organizations API instance.
:return: organizations api
"""
return OrganizationsApi(self)
def tasks_api(self) -> TasksApi:
"""
Create the Tasks API instance.
:return: tasks api
"""
return TasksApi(self)
def labels_api(self) -> LabelsApi:
"""
Create the Labels API instance.
:return: labels api
"""
return LabelsApi(self)
def health(self) -> HealthCheck:
"""
Get the health of an instance.
:return: HealthCheck
"""
health_service = HealthService(self.api_client)
try:
health = health_service.get_health()
return health
except Exception as e:
return HealthCheck(name="influxdb", message=str(e), status="fail")
def ready(self) -> Ready:
"""
Get The readiness of the InfluxDB 2.0.
:return: Ready
"""
ready_service = ReadyService(self.api_client)
return ready_service.get_ready()
def delete_api(self) -> DeleteApi:
"""
Get the delete metrics API instance.
:return: delete api
"""
return DeleteApi(self)
class _Configuration(Configuration):
def __init__(self):
Configuration.__init__(self)
self.enable_gzip = False
def update_request_header_params(self, path: str, params: dict):
super().update_request_header_params(path, params)
if self.enable_gzip:
# GZIP Request
if path == '/api/v2/write':
params["Content-Encoding"] = "gzip"
params["Accept-Encoding"] = "identity"
pass
# GZIP Response
if path == '/api/v2/query':
# params["Content-Encoding"] = "gzip"
params["Accept-Encoding"] = "gzip"
pass
pass
pass
def update_request_body(self, path: str, body):
_body = super().update_request_body(path, body)
if self.enable_gzip:
# GZIP Request
if path == '/api/v2/write':
import gzip
if isinstance(_body, bytes):
return gzip.compress(data=_body)
else:
return gzip.compress(bytes(_body, "utf-8"))
return _body
def _to_bool(bool_value):
return str(bool_value).lower() in ("yes", "true")
def _to_int(int_value):
return int(int_value) if int_value is not None else None
| python | 14,169 |
from django.conf import settings
from telegram import Bot
from telegram.ext import (
Dispatcher,
CommandHandler,
MessageHandler,
Filters,
CallbackQueryHandler,
ConversationHandler
)
from .commands.menu import (
claim, chat_support, auth, faq, personal_account
)
from .commands.menu.auth import PHONE, ADDRESS, phone, cancel, address
from .commands.menu.claim import CITY_CLAIM, ADDRESS_CLAIM, cancel_claim, address_claim, city_claim, phone_claim, \
PHONE_CLAIM
from .commands import base_commands
from .commands import admin_commands
from .commands.admin_commands import TEXT, USER_LIST
BACK = 'back'
def setup():
tgbot = Bot(settings.TELEGRAM_BOT_TOKEN)
if settings.TELEGRAM_BOT_WEBHOOK_ENABLED:
tgbot.set_webhook(
settings.TELEGRAM_BOT_WEBHOOK_URL + settings.TELEGRAM_BOT_WEBHOOK_PATH
)
dp = Dispatcher(tgbot, None)
# Authorization
conv_handler_auth = ConversationHandler(
entry_points=[MessageHandler(Filters.regex('👤 Особистий кабінет'), auth)],
states={
PHONE: [MessageHandler(Filters.contact, phone)],
ADDRESS: [CallbackQueryHandler(address)],
},
fallbacks=[CommandHandler('cancel', cancel)],
)
# Claim
conv_handler_claim = ConversationHandler(
entry_points=[MessageHandler(Filters.regex('🔌 Заявка на підключення'), claim)],
states={
CITY_CLAIM: [
MessageHandler(Filters.text(['📍 Кропивницький', '📍 Знам`янка']), city_claim, pass_user_data=True)],
ADDRESS_CLAIM: [MessageHandler(Filters.all, address_claim, pass_user_data=True)],
PHONE_CLAIM: [MessageHandler(Filters.contact, phone_claim, pass_user_data=True)],
},
fallbacks=[CommandHandler('cancel', cancel_claim)],
)
# Mailing to bot users
conv_handler_mailing = ConversationHandler(
entry_points=[CommandHandler('mailing_message', admin_commands.mailing_message)],
states={
TEXT: [MessageHandler(Filters.all, admin_commands.text_message, pass_user_data=True)],
USER_LIST: [MessageHandler(Filters.all, admin_commands.mailing_start, pass_user_data=True)],
},
fallbacks=[CommandHandler('cancel', admin_commands.cancel_mailing)],
)
# HANDLERS ADD -------------------------
# Base commands
dp.add_handler(CommandHandler('start', base_commands.command_start))
dp.add_handler(CommandHandler('help', base_commands.command_help))
# Conversations
dp.add_handler(conv_handler_auth) # Authorization
dp.add_handler(conv_handler_claim) # Claims
# admin commands
dp.add_handler(CommandHandler('admin', admin_commands.admin_help))
dp.add_handler(conv_handler_mailing) # Mailing to all bot users
# FAQ
dp.add_handler(CommandHandler('faq', faq))
dp.add_handler(MessageHandler(Filters.regex('💡 F.A.Q'), faq))
dp.add_handler(CallbackQueryHandler(base_commands.inline_button)) # pattern='main'
dp.add_handler(MessageHandler(Filters.regex('✉️ Чат з оператором'), chat_support))
# User menu
dp.add_handler(MessageHandler(Filters.regex('👁 Інформація про користувача'), personal_account.user_info))
dp.add_handler(MessageHandler(Filters.regex('🌐 Мій тариф'), personal_account.tariff_plan))
dp.add_handler(MessageHandler(Filters.regex('📺 Телебачення'), personal_account.tv_tariff_plan))
dp.add_handler(MessageHandler(Filters.regex('💳 Фінансові операції'), personal_account.financial_operations_info))
# dp.add_handler(MessageHandler(Filters.all, base_commands.unknown))
dp.add_error_handler(base_commands.error_handler)
# ------------------------------------
return tgbot, dp
bot, dispatcher = setup()
| python | 3,756 |
"""Walk result example for a single timeserie"""
import lisptick
HOST = "uat.lisptick.org"
PORT = 12006
def main():
"""Ask for temperature at Poitiers airport"""
conn = lisptick.Socket(HOST, PORT)
request = """(timeserie @"t" "meteonet" "86027001" 2017-07-06)"""
# call show_value for each value one by one, as soon as it arrives
conn.walk_result(request, print_value)
def print_value(_, __, value):
"""reader and uid are useless as result is a single timeserie"""
print(value)
if __name__ == "__main__":
main()
| python | 548 |
'''
================================================
DOWNLOAD_AUDIOSET REPOSITORY
================================================
repository name: download_audioset
repository version: 1.0
repository link: https://github.com/jim-schwoebel/download_audioset
author: Jim Schwoebel
author contact: [email protected]
description: downloads the raw audio files from AudioSet (released by Google).
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-11-08
This code (download_audioset) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
SPECIAL NOTES
================================================
This script parses through the entire balanced audioset dataset and downloads
all the raw audio files. The files are arranged in folders according to their
representative classes.
Please ensure that you have roughly 35GB of free space on your computer before
downloading the files. Note that it may take up to 2 days to fully download
all the files.
Enjoy! - :)
-Jim
================================================
LICENSE TERMS
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
SERVICE STATEMENT
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ [email protected].
'''
################################################################################
## IMPORT STATEMENTS ##
################################################################################
import pafy, os, shutil, time, ffmpy
from natsort import natsorted
import pandas as pd
import soundfile as sf
from tqdm import tqdm
################################################################################
## HELPER FUNCTIONS ##
################################################################################
#function to clean labels
def convertlabels(sortlist,labels,textlabels):
clabels=list()
try:
index=labels.index(sortlist)
clabel=textlabels[index]
#pull out converted label
clabels.append(clabel)
except:
clabels=[]
return clabels
def download_audio(link):
listdir=os.listdir()
os.system("youtube-dl -f 'bestaudio[ext=m4a]' '%s'"%(link))
listdir2=os.listdir()
filename=''
for i in range(len(listdir2)):
if listdir2[i] not in listdir and listdir2[i].endswith('.m4a'):
filename=listdir2[i]
break
return filename
################################################################################
## MAIN SCRIPT ##
################################################################################
defaultdir=os.getcwd()
os.chdir(defaultdir)
#load labels of the videos
#number, label, words
loadfile=pd.read_excel('labels.xlsx')
number=loadfile.iloc[:,0].tolist()
labels=loadfile.iloc[:,1].tolist()
textlabels=loadfile.iloc[:,2].tolist()
#remove spaces for folders
for i in range(len(textlabels)):
textlabels[i]=textlabels[i].replace(' ','')
#now load data for youtube
loadfile2=pd.read_excel('unbalanced_train_segments.xlsx')
# ylabels have to be cleaned to make a good list (CSV --> LIST)
yid=loadfile2.iloc[:,0].tolist()[2:]
ystart=loadfile2.iloc[:,1].tolist()[2:]
yend=loadfile2.iloc[:,2].tolist()[2:]
ylabels=loadfile2.iloc[:,3].tolist()[2:]
print(set(ylabels))
#make folders
try:
defaultdir2=os.getcwd()+'/audiosetdata/'
os.chdir(os.getcwd()+'/audiosetdata')
except:
defaultdir2=os.getcwd()+'/audiosetdata/'
os.mkdir(os.getcwd()+'/audiosetdata')
os.chdir(os.getcwd()+'/audiosetdata')
existing_wavfiles=list()
for i in range(len(textlabels)):
try:
os.mkdir(textlabels[i])
except:
os.chdir(textlabels[i])
listdir=os.listdir()
for j in range(len(listdir)):
if listdir[j].endswith('.wav'):
existing_wavfiles.append(listdir[j])
os.chdir(defaultdir2)
# get last file checkpoint to leave off
existing_wavfiles=natsorted(existing_wavfiles)
print(existing_wavfiles)
try:
lastfile=int(existing_wavfiles[-1][7:][0:-4])
except:
lastfile=0
#iterate through entire CSV file, look for '--' if found, find index, delete section, then go to next index
slink='https://www.youtube.com/watch?v='
for i in tqdm(range(len(yid))):
if i < lastfile:
print('skipping, already downloaded file...')
else:
link=slink+yid[i]
start=float(ystart[i])
end=float(yend[i])
print(ylabels[i])
clabels=convertlabels(ylabels[i],labels,textlabels)
print(clabels)
if clabels != []:
#change to the right directory
newdir=defaultdir2+clabels[0]+'/'
os.chdir(newdir)
#if it is the first download, pursue this path to download video
lastdir=os.getcwd()+'/'
if 'snipped'+str(i)+'.wav' not in os.listdir():
try:
# use YouTube DL to download audio
filename=download_audio(link)
extension='.m4a'
#get file extension and convert to .wav for processing later
os.rename(filename,'%s%s'%(str(i),extension))
filename='%s%s'%(str(i),extension)
if extension not in ['.wav']:
xindex=filename.find(extension)
filename=filename[0:xindex]
ff=ffmpy.FFmpeg(
inputs={filename+extension:None},
outputs={filename+'.wav':None}
)
ff.run()
os.remove(filename+extension)
file=filename+'.wav'
data,samplerate=sf.read(file)
totalframes=len(data)
totalseconds=totalframes/samplerate
startsec=start
startframe=samplerate*startsec
endsec=end
endframe=samplerate*endsec
# print(startframe)
# print(endframe)
sf.write('snipped'+file, data[int(startframe):int(endframe)], samplerate)
snippedfile='snipped'+file
os.remove(file)
except:
print('no urls')
#sleep 3 second sleep to prevent IP from getting banned
time.sleep(2)
else:
print('skipping, already downloaded file...')
| python | 8,155 |
import copy
def dict_merge(dict1, dict2):
"""
recursive update (not in-place).
dict2 has precendence for equal keys.
"""
dict1 = copy.deepcopy(dict1)
for key in dict2:
val = dict2[key]
if type(val) is dict:
# merge dictionaries
if key in dict1 and type(dict1[key]) is dict:
dict1[key] = dict_merge(dict1[key], val)
else:
dict1[key] = val
else:
dict1[key] = val
return dict1
| python | 509 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Andreas Ecker (ecker)
# * Fabian Jakobs (fjakobs)
#
################################################################################
##
# MODULE DESCRIPTIOIN
#
# api.py -- Generates a tree of documentation nodes from a JavaScript synatx
# tree, walking the syntax tree and picking out ecmascript.frontend.
# comment nodes; uses ecmascript.frontend.tree.Node for the tree;
# creates a suitable tree structure to hold the individual JSDoc
# comments (which are -unfortunately- formatted in e.f.comment into
# HTML).
##
import sys, os, re, string, copy
from ecmascript.frontend import tree, Comment, lang
from ecmascript.frontend import treeutil
from ecmascript.frontend import treegenerator
from ecmascript.frontend.treegenerator import PackerFlags as pp
from ecmascript.transform.optimizer import variantoptimizer # ugly here
from generator import Context
########################################################################################
#
# MAIN
#
########################################################################################
class DocException (Exception):
def __init__ (self, msg, syntaxItem):
Exception.__init__(self, msg)
self.node = syntaxItem
def createDoc(syntaxTree, docTree = None):
if not docTree:
docTree = tree.Node("doctree")
attachMap = {} # {"targetclass#targetmethod" : method_docnode}
defineNode = treeutil.findQxDefine(syntaxTree)
if defineNode != None:
variant = treeutil.selectNode(defineNode, "operand").toJS(pp).split(".")[1].lower() # 'class' in 'qx.Class.define'
handleClassDefinition(docTree, defineNode, variant)
attachMap = findAttachMethods(docTree)
ret = (docTree, False, attachMap)
return ret
def createPackageDoc(text, packageName, docTree = None):
if not docTree:
docTree = tree.Node("doctree")
package = getPackageNode(docTree, packageName)
commentAttributes = Comment.Comment(text).parse(want_errors=True)
# check for JSDoc issues (no filtering)
for attrib in commentAttributes:
if 'error' in attrib:
lineno = attrib['line'] # assume the comment text is the only contents of the package odc
msg = "%s (%s): %s" % (packageName, lineno, attrib['message'])
msg += (": %s" % attrib['text']) if 'text' in attrib and attrib['text'] else ''
Context.console.warn(msg)
# Read description, see attributes
for attrib in commentAttributes:
# Add description
if attrib["category"] == "description":
package = addChildIf(package, *(handleJSDocDecsription(attrib)))
elif attrib["category"] == "see":
package = addChildIf(package, *(handleJSDocSee(attrib)))
return docTree
def handleClassDefinition(docTree, callNode, variant):
params = callNode.getChild("arguments")
className = params.children[0].get("value")
if len(params.children) > 1:
classMap = params.children[1]
else:
classMap = {}
cls_cmnt_node = treeutil.findLeftmostChild(callNode.getChild("operand"))
commentAttributes = Comment.parseNode(cls_cmnt_node)[-1]
classNode = classNodeFromDocTree(docTree, className, commentAttributes)
if variant == "class":
classNode.set("type", "class")
type = treeutil.selectNode(params, "2/keyvalue[@key='type']/value/constant/@value")
if type == "singleton":
classNode.set("isSingleton", True)
elif type == "abstract":
classNode.set("isAbstract", True)
else:
classNode.set("type", variant)
handleDeprecated(classNode, commentAttributes)
handleAccess(classNode, commentAttributes)
handleChildControls(callNode, classNode, className, commentAttributes)
try:
children = classMap.children
except AttributeError:
return
for keyvalueItem in children:
if keyvalueItem.type != "keyvalue":
continue
key = keyvalueItem.get("key")
valueItem = keyvalueItem.getChild("value").getFirstChild()
# print "KEY: %s = %s" % (key, valueItem.type)
if key == "extend":
if variant in ("class", "bootstrap"):
handleClassExtend(valueItem, classNode, docTree, className)
elif variant == "interface":
handleInterfaceExtend(valueItem, classNode, docTree, className)
elif key == "include":
handleMixins(valueItem, classNode, docTree, className)
elif key == "implement":
handleInterfaces(valueItem, classNode, docTree)
elif key == "construct":
handleConstructor(valueItem, classNode)
elif key == "statics":
handleStatics(valueItem, classNode)
elif key == "properties":
handleProperties(valueItem, classNode)
elif key == "members":
handleMembers(valueItem, classNode)
elif key == "events":
handleEvents(valueItem, classNode)
handleSingleton(classNode, docTree)
if not classNode.hasChild("desc"):
addError(classNode, "Class documentation is missing.", callNode)
def handleClassExtend(valueItem, classNode, docTree, className):
superClassName = (treeutil.assembleVariable(valueItem))[0]
superClassNode = classNodeFromDocTree(docTree, superClassName)
childClasses = superClassNode.get("childClasses", False)
if childClasses:
childClasses += "," + className
else:
childClasses = className
superClassNode.set("childClasses", childClasses)
classNode.set("superClass", superClassName)
def handleInterfaceExtend(valueItem, classNode, docTree, className):
superInterfaceNames = treeutil.variableOrArrayNodeToArray(valueItem)
for superInterface in superInterfaceNames:
superInterfaceNode = classNodeFromDocTree(docTree, superInterface)
childInterfaces = superInterfaceNode.get("childClasses", False)
if childInterfaces:
childInterfaces += "," + className
else:
childInterfaces = className
superInterfaceNode.set("childClasses", childInterfaces)
node = tree.Node("class")
node.set("type", "interface")
node.set("name", superInterface)
packageName = superInterface[:superInterface.rindex(".")]
node.set("packageName", packageName)
classNode.addListChild("superInterfaces", node)
#superInterfaceNode.type = "interface"
#classNode.addListChild("superInterfaces", superInterfaceNode)
# example for string-valued attributes["superInterfaces"] property
#superInterfaces = classNode.get("superInterfaces", False)
#if superInterfaces:
# superInterfaces += "," + superInterface
#else:
# superInterfaces = superInterface
#classNode.set("superInterfaces", superInterfaces)
return
def handleMixins(item, classNode, docTree, className):
try:
# direct symbol or list of symbols
mixins = treeutil.variableOrArrayNodeToArray(item)
except tree.NodeAccessException:
try:
# call to qx.core.Environment.filter
filterMap = variantoptimizer.getFilterMap(item, classNode.get("fullName"))
assert filterMap
includeSymbols = []
for key, node in filterMap.items():
# to select the current environment variant, add something like:
# if key not in variants or (key in variants and bool(variants[key]):
# map value has to be variable
variable = node.children[0]
assert variable.isVar()
symbol, isComplete = treeutil.assembleVariable(variable)
assert isComplete
includeSymbols.append(symbol)
mixins = includeSymbols
except AssertionError:
Context.console.warn("Illegal include definition in " + classNode.get("fullName"))
return
for mixin in mixins:
mixinNode = classNodeFromDocTree(docTree, mixin)
includer = mixinNode.get("includer", False)
if includer:
includer += "," + className
else:
includer = className
mixinNode.set("includer", includer)
classNode.set("mixins", ",".join(mixins))
def handleSingleton(classNode, docTree):
if classNode.get("isSingleton", False) == True:
className = classNode.get("fullName")
functionCode = ("/**\n"
"* Returns a singleton instance of this class. On the first call the class\n"
"* is instantiated by calling the constructor with no arguments. All following\n"
"* calls will return this instance.\n"
"*\n"
'* This method has been added by setting the "type" key in the class definition\n'
'* ({@link qx.Class#define}) to "singleton".\n'
"*\n"
"* @return {%s} The singleton instance of this class.\n"
"*/\n"
"function() {}") % className
node = treeutil.compileString(functionCode)
commentAttributes = Comment.parseNode(node)[-1]
docNode = handleFunction(node, "getInstance", commentAttributes, classNode)
docNode.set("isStatic", True)
classNode.addListChild("methods-static", docNode)
def handleInterfaces(item, classNode, docTree):
className = classNode.get("fullName")
try:
interfaces = treeutil.variableOrArrayNodeToArray(item)
except tree.NodeAccessException:
Context.console.warn("")
Context.console.warn("Illegal implement definition in " + classNode.get("fullName"))
return
for interface in interfaces:
interfaceNode = classNodeFromDocTree(docTree, interface)
impl = interfaceNode.get("implementations", False)
if impl:
impl += "," + className
else:
impl = className
interfaceNode.set("implementations", impl)
classNode.set("interfaces", ",".join(interfaces))
def handleConstructor(ctorItem, classNode):
if ctorItem and ctorItem.type == "function":
commentAttributes = Comment.parseNode(ctorItem.parent.parent)[-1]
ctor = handleFunction(ctorItem, "ctor", commentAttributes, classNode, reportMissingDesc=False)
ctor.set("isCtor", True)
classNode.addListChild("constructor", ctor)
def handleStatics(item, classNode):
for key, value in treeutil.mapNodeToMap(item).items():
keyvalue = value.parent
value = value.getFirstChild()
commentAttributes = Comment.parseNode(keyvalue)[-1]
# handle @signature
if value.type != "function":
for docItem in commentAttributes:
if docItem["category"] == "signature":
js_string = 'function(' + ",".join(docItem['arguments']) + '){}'
value = treeutil.compileString(js_string)
#TODO: Warn if syntax error
# Function
if value.type == "function":
node = handleFunction(value, key, commentAttributes, classNode)
node.set("isStatic", True)
if classNode.get("type", False) == "mixin":
node.set("isMixin", True)
classNode.addListChild("methods-static", node)
# Constant
elif not key[:2] == "$$":
handleConstantDefinition(keyvalue, classNode)
def handleMembers(item, classNode):
for key, value in treeutil.mapNodeToMap(item).items():
keyvalue = value.parent
value = value.getFirstChild()
commentAttributes = Comment.parseNode(keyvalue)[-1]
# handle @signature
signatureError = None
if value.type != "function":
for docItem in commentAttributes:
if docItem["category"] == "signature":
if "error" in docItem:
signatureError = "%s: %s" % (docItem["category"], docItem["message"])
value = treeutil.compileString('function(){}')
continue
js_string = 'function(' + ",".join(docItem['arguments']) + '){}'
value = treeutil.compileString(js_string)
if value.type == "function":
node = handleFunction(value, key, commentAttributes, classNode)
if classNode.get("type", False) == "mixin":
node.set("isMixin", True)
if signatureError:
addError(node, signatureError)
classNode.addListChild("methods", node)
def generatePropertyMethods(propertyName, classNode, generatedMethods):
if propertyName[:2] == "__":
access = "__"
name = propertyName[2:]
elif propertyName[:1] == "_":
access = "_"
name = propertyName[1:]
else:
access = ""
name = propertyName
name = name[0].upper() + name[1:]
propData = {
access + "set" + name : ("/**\n"
"* Sets the user value of the property <code>%s</code>.\n"
"*\n"
"* For further details take a look at the property definition: {@link #%s}.\n"
"*\n"
"* @param value {var} New value for property <code>%s</code>.\n"
"* @return {var} The unmodified incoming value.\n"
"*/\n"
"function (value) {}; ") % (propertyName, propertyName, propertyName),
access + "get" + name : ("/**\n"
"* Returns the (computed) value of the property <code>%s</code>.\n"
"*\n"
"* For further details take a look at the property definition: {@link #%s}.\n"
"*\n"
"* @return {var} (Computed) value of <code>%s</code>.\n"
"*/\n"
"function () {}; ") % (propertyName, propertyName, propertyName),
access + "reset" + name : ("/**\n"
"* Resets the user value of the property <code>%s</code>.\n"
"*\n"
"* The computed value falls back to the next available value e.g. appearance, init or\n"
"* inheritance value depeneding on the property configuration and value availability.\n"
"*\n"
"* For further details take a look at the property definition: {@link #%s}.\n"
"*/\n"
"function () {}; ") % (propertyName, propertyName),
access + "init" + name : ("/**\n"
"* Calls the apply method and dispatches the change event of the property <code>%s</code>\n"
"* with the default value defined by the class developer. This function can\n"
"* only be called from the constructor of a class.\n"
"*\n"
"* For further details take a look at the property definition: {@link #%s}.\n"
"*\n"
"* @protected\n"
"* @param value {var} Initial value for property <code>%s</code>.\n"
"* @return {var} the default value\n"
"*/\n"
"function (value) {}; ") % (propertyName, propertyName, propertyName),
access + "toggle" + name : ("/**\n"
"* Toggles the (computed) value of the boolean property <code>%s</code>.\n"
"*\n"
"* For further details take a look at the property definition: {@link #%s}.\n"
"*\n"
"* @return {Boolean} the new value\n"
"*/\n"
"function () {}; ") % (propertyName, propertyName),
access + "is" + name : ("/**\n"
"* Check whether the (computed) value of the boolean property <code>%s</code> equals <code>true</code>.\n"
"*\n"
"* For further details take a look at the property definition: {@link #%s}.\n"
"*\n"
"* @return {Boolean} Whether the property equals <code>true</code>.\n"
"*/\n"
"function () {}; ") % (propertyName, propertyName)
}
for funcName in generatedMethods:
funcName = access + funcName + name
functionCode = propData[funcName]
node = treeutil.compileString(functionCode)
node.getRoot().set('file', '|[email protected]|')
commentAttributes = Comment.parseNode(node)[-1]
docNode = handleFunction(node, funcName, commentAttributes, classNode, False, False)
docNode.remove("line")
docNode.set("fromProperty", propertyName)
classNode.addListChild("methods", docNode)
def handlePropertyDefinitionNew(propName, propDefinition, classNode):
node = tree.Node("property")
node.set("name", propName)
if "init" in propDefinition:
node.set("defaultValue", getValue(propDefinition["init"].getFirstChild()))
if "nullable" in propDefinition:
node.set("allowNull", propDefinition["nullable"].getChild("constant").get("value"))
if "inheritable" in propDefinition:
node.set("inheritable", propDefinition["inheritable"].getChild("constant").get("value"))
if "themeable" in propDefinition:
node.set("themeable", propDefinition["themeable"].getChild("constant").get("value"))
if "refine" in propDefinition:
refineValue = propDefinition["refine"].getChild("constant").get("value")
if refineValue == "true":
node.set("refine", "true")
if "apply" in propDefinition:
node.set("apply", propDefinition["apply"].getChild("constant").get("value"))
if "event" in propDefinition:
eventName = propDefinition["event"].getChild("constant").get("value")
node.set("event", eventName)
event = tree.Node("event")
event.set("name", eventName)
event.addChild(tree.Node("desc").set("text", "Fired on change of the property {@link #%s}." % propName))
typesNode = tree.Node("types")
event.addChild(typesNode)
itemNode = tree.Node("entry")
typesNode.addChild(itemNode)
itemNode.set("type", "qx.event.type.Data")
classNode.addListChild("events", event)
if "check" in propDefinition:
check = propDefinition["check"].getFirstChild()
if check.type == "array":
values = [getValue(arrayItem) for arrayItem in check.children]
node.set("possibleValues", ",".join(values))
elif check.type == "function":
node.set("check", "Custom check function.")
elif check.type == "constant":
# this can mean: type name or check expression
# test by parsing it
check_value = check.get("value")
check_tree = treegenerator.parse(check_value)
if check_tree.children[0].isVar(): # tree is (statements (...))
node.set("check", check_value) # type name
else: # don't dare to be more specific
#elif check_tree.type in ('operation', 'call'): # "value<=100", "qx.util.Validate.range(0,100)"
node.set("check", "Custom check function.") # that's good enough so the param type is set to 'var'
else:
addError(node, "Unknown property check value: '%s'" % check.type, propDefinition["check"])
return node
def generateGroupPropertyMethod(propertyName, groupMembers, mode, classNode):
if propertyName[:2] == "__":
access = "__"
functionName = propertyName[2:]
elif propertyName[:1] == "_":
access = "_"
functionName = propertyName[1:]
else:
access = ""
functionName = propertyName
functionName = access + "set" + functionName[0].upper() + functionName[1:]
functionTemplate = ("/**\n"
"* Sets the values of the property group <code>%(name)s</code>.\n"
"* %(modeDoc)s\n"
"* For further details take a look at the property definition: {@link #%(name)s}.\n"
"*\n"
"%(params)s\n"
"*/\n"
"function (%(paramList)s) {}; ")
paramsTemplate = " * @param %s {var} Sets the value of the property {@link #%s}."
paramsDef = [paramsTemplate % (name, name) for name in groupMembers]
if mode == "shorthand":
modeDoc = "\n * This setter supports a shorthand mode compatible with the way margins and paddins are set in CSS.\n *"
else:
modeDoc = ""
functionCode = functionTemplate % ({
"name" : propertyName,
"modeDoc" : modeDoc,
"params" : "\n".join(paramsDef),
"paramList" : ", ".join(groupMembers)
})
functionNode = treeutil.compileString(functionCode)
commentAttributes = Comment.parseNode(functionNode)[-1]
docNode = handleFunction(functionNode, functionName, commentAttributes, classNode)
docNode.set("fromProperty", propertyName)
classNode.addListChild("methods", docNode)
def handlePropertyGroup(propName, propDefinition, classNode):
node = tree.Node("property")
node.set("name", propName)
group = propDefinition["group"].getFirstChild()
groupMembers = [getValue(arrayItem) for arrayItem in group.children]
node.set("group", ",".join(groupMembers));
if "mode" in propDefinition:
node.set("mode", propDefinition["mode"].getChild("constant").get("value"))
if "themeable" in propDefinition:
node.set("themeable", propDefinition["themeable"].getChild("constant").get("value"))
return node
def handleProperties(item, classNode):
for propName, value in treeutil.mapNodeToMap(item).items():
keyvalue = value.parent
value = value.getFirstChild()
if value.type != "map":
continue
propDefinition = treeutil.mapNodeToMap(value)
#print propName, propDefinition
if "group" in propDefinition:
node = handlePropertyGroup(propName, propDefinition, classNode)
node.set("propertyType", "group")
groupMembers = [member[1:-1] for member in node.get("group").split(",")]
generateGroupPropertyMethod(propName, groupMembers, node.get("mode", False), classNode)
generatePropertyMethods(propName, classNode, ["reset"])
else:
node = handlePropertyDefinitionNew(propName, propDefinition, classNode)
node.set("propertyType", "new")
if node.get("refine", False) != "true":
generatePropertyMethods(propName, classNode, ["set", "get", "init", "reset"])
if node.get("check", False) == "Boolean":
generatePropertyMethods(propName, classNode, ["toggle", "is"])
if classNode.get("type", False) == "mixin":
node.set("isMixin", True)
commentAttributes = Comment.parseNode(keyvalue)[-1]
for attrib in commentAttributes:
addTypeInfo(node, attrib, item)
handleDeprecated(node, commentAttributes)
handleAccess(node, commentAttributes)
if not node.hasChild("desc"):
addError(node, "Documentation is missing.", item)
classNode.addListChild("properties", node)
def handleEvents(item, classNode):
for key, value_ in treeutil.mapNodeToMap(item).items():
keyvalue = value_.parent
value = value_.getFirstChild(True, True).toJavascript()
value = string.strip(value, '\'"') # unquote result from .toJavascript; TODO: unnecessary with .toJS!?
node = tree.Node("event")
commentAttributes = Comment.parseNode(keyvalue)[-1]
try:
desc = commentAttributes[0]["text"]
except (IndexError, KeyError):
desc = None
addError(node, "Documentation is missing.", item)
if desc != None:
node.addChild(tree.Node("desc").set("text", desc))
node.set("name", key)
typesNode = tree.Node("types")
node.addChild(typesNode)
itemNode = tree.Node("entry")
typesNode.addChild(itemNode)
itemNode.set("type", value)
handleDeprecated(node, commentAttributes)
handleAccess(node, commentAttributes)
classNode.addListChild("events", node)
def handleDeprecated(docNode, commentAttributes):
for docItem in commentAttributes:
if docItem["category"] == "deprecated":
deprecatedNode = tree.Node("deprecated")
if "text" in docItem:
descNode = tree.Node("desc").set("text", docItem["text"])
deprecatedNode.addChild(descNode)
docNode.addChild(deprecatedNode)
def handleAccess(docNode, commentAttributes):
name = docNode.get("name")
if name[:2] == "__":
access = "private"
elif name[:1] == "_":
access = "protected"
else:
access = "public"
for docItem in commentAttributes:
if docItem["category"] == "internal":
access = "internal"
docNode.set("isInternal", True)
elif docItem["category"] == "public":
access = "public"
elif docItem["category"] == "protected":
access = "protected"
elif docItem["category"] == "public":
access = "public"
if access != "public":
docNode.set("access", access)
def handleChildControls(item, classNode, className, commentAttributes):
for attrib in commentAttributes:
if attrib["category"] == "childControl":
if "error" in attrib:
msg = "%s: %s" % (attrib["category"], attrib["message"])
addError(classNode, msg, item)
if not "name" in attrib:
addError(classNode, "No name defined for child control.", item)
return
childControlName = attrib["name"]
childControlNode = tree.Node("childControl")
childControlNode.set("name", childControlName)
if not "type" in attrib:
addError(classNode, "No type defined for child control: '%s'." % childControlName, item)
addTypeInfo(childControlNode, attrib, item)
classNode.addListChild("childControls", childControlNode)
def handleConstantDefinition(item, classNode):
if (item.type == "assignment"):
# This is a "normal" constant definition
leftItem = item.getFirstListChild("left")
name = leftItem.children[len(leftItem.children) - 1].get("name")
valueNode = item.getChild("right")
elif (item.type == "keyvalue"):
# This is a constant definition of a map-style class (like qx.Const)
name = item.get("key")
valueNode = item.getChild("value")
node = tree.Node("constant")
node.set("name", name)
if valueNode.hasChild("constant"):
node.set("value", valueNode.getChild("constant").get("value"))
node.set("type", valueNode.getChild("constant").get("constantType").capitalize())
elif valueNode.hasChild("array"):
arrayNode = valueNode.getChild("array")
if all([x.type == "constant" for x in arrayNode.children]):
node.set("value", arrayNode.toJS(pp))
node.set("type", "Array")
commentAttributes = Comment.parseNode(item)[-1]
for attr in commentAttributes:
addTypeInfo(node, attr, item)
handleDeprecated(node, commentAttributes)
handleAccess(node, commentAttributes)
classNode.addListChild("constants", node)
def getReturnNodes(parent):
returnNodes = []
def getReturnNode(parent):
for node in parent.getChildren():
if node.type == "return":
returnNodes.append(node)
continue
if node.type == "function":
continue
if len(node.getChildren()) > 0:
getReturnNode(node)
getReturnNode(parent)
return returnNodes
def handleFunction(funcItem, name, commentAttributes, classNode, reportMissingDesc=True, checkReturn=True):
node = tree.Node("method")
node.set("name", name)
(line, column) = treeutil.getLineAndColumnFromSyntaxItem(funcItem)
if line:
node.set("line", line)
# Read the parameters
params = funcItem.getChild("params", False)
if params and params.hasChildren():
for param in params.children:
if param.type != "identifier":
continue
paramNode = tree.Node("param")
paramNode.set("name", param.get("value"))
node.addListChild("params", paramNode)
# Check whether the function is abstract
#bodyBlockItem = funcItem.getChild("body").getFirstChild()
#if bodyBlockItem.type == "block" and bodyBlockItem.hasChildren():
# firstStatement = bodyBlockItem.children[0]
handleAccess(node, commentAttributes)
handleDeprecated(node, commentAttributes)
isAbstract = classNode.get("isAbstract", False)
# Read all description, param and return attributes
isAbstract = handleFunctionOtherAttributes(classNode, funcItem, name, commentAttributes, node, isAbstract)
# Check for documentation errors
if node.hasChild("params"):
paramsListNode = node.getChild("params")
for paramNode in paramsListNode.children:
if not paramNode.getChild("desc", False):
addError(node, "Parameter is not documented: '%s'" % paramNode.get("name"), funcItem)
if reportMissingDesc and not node.hasChild("desc"):
addError(node, "Documentation is missing.", funcItem)
# Check whether return value documentation is correct
if checkReturn:
handleFunctionReturn(classNode, funcItem, name, commentAttributes, node, isAbstract)
return node
def handleFunctionOtherAttributes(classNode, funcItem, name, commentAttributes, node, isAbstract):
for attrib in commentAttributes:
# Add description
if attrib["category"] == "description":
node = addChildIf(node, *(handleJSDocDecsription(attrib, funcItem)))
elif attrib["category"] == "see":
node = addChildIf(node, *(handleJSDocSee(attrib)))
elif attrib["category"] in ("attach", "attachStatic"):
if not "targetClass" in attrib:
addError(node, "Missing target for attach.", funcItem)
continue
attachNode = tree.Node(attrib["category"]).set("targetClass", attrib["targetClass"])
attachNode.set("targetMethod", attrib["targetMethod"])
attachNode.set("sourceClass", classNode.get("fullName")) # these two are interesting for display at the target class
attachNode.set("sourceMethod", name)
node.addChild(attachNode)
elif attrib["category"] == "param":
if not "name" in attrib:
addError(node, "Missing name of parameter", funcItem)
continue
# Find the matching param node
paramName = attrib["name"]
paramNode = node.getListChildByAttribute("params", "name", paramName, False)
if not paramNode:
addError(node, "Contains information for non-existing parameter: '%s'." % paramName, funcItem)
continue
addTypeInfo(paramNode, attrib, funcItem)
elif attrib["category"] == "return":
returnNode = tree.Node("return")
node.addChild(returnNode)
addTypeInfo(returnNode, attrib, funcItem)
elif attrib["category"] == "throws":
if node.hasChild("throws"):
throwsNode = node.getChild("throws")
else:
throwsNode = tree.Node("throws")
if not "text" in attrib:
addError(node, "Throws documentation is missing.", funcItem)
else:
child = tree.Node("desc")
child.set("text", attrib["text"])
if "type" in attrib:
child.set("type", attrib["type"])
throwsNode.addChild(child)
node.addChild(throwsNode)
elif attrib["category"] == "abstract":
isAbstract = True
if not classNode.get("isAbstract", False):
node.set("isAbstract", True)
return isAbstract
def handleFunctionReturn(classNode, funcNode, funcName, commentAttributes, docNode, isAbstract):
hasComment = len(commentAttributes) > 0
isInterface = classNode.get("type", False) == "interface"
hasSignatureDef = False
for docItem in commentAttributes:
if docItem["category"] == "signature":
hasSignatureDef = True
#overrides = False
#if len(commentAttributes) == 0:
# superClassName = classNode.get("superClass", False)
# if superClassName:
# superClassNode = selectNode(classNode, "../class[@fullName='%s']" %superClassName)
# while superClassNode:
# superClassNode = selectNode(classNode, "../class[@fullName='%s']" %superClassName)
if hasComment and not isInterface and not hasSignatureDef and not isAbstract:
returnNodes = getReturnNodes(funcNode)
hasReturnValue = False
hasNoReturnValue = False
hasReturnNodes = len(returnNodes) > 0
for returnNode in returnNodes:
if len(returnNode.getChildren()) > 0:
hasReturnValue = True
else:
hasNoReturnValue = True
hasReturnDoc = False
hasUndefinedOrVarType = False
hasNonUndefinedOrVarType = False
if Comment.getAttrib(commentAttributes, "return"):
hasVoidType = False
if "type" in Comment.getAttrib(commentAttributes, "return"):
for typeDef in Comment.getAttrib(commentAttributes, "return")["type"]:
if typeDef["type"] == "void":
hasVoidType = True
elif typeDef["type"] == "undefined" or typeDef["type"] == "var":
hasUndefinedOrVarType = True
else:
hasNonUndefinedOrVarType = True
if not hasVoidType:
hasReturnDoc = True
isSingletonGetInstance = classNode.get("isSingleton", False) and funcName == "getInstance"
if hasReturnDoc and not hasReturnNodes and not isSingletonGetInstance:
addError(docNode, "Contains documentation for return value but no return statement found.", funcNode)
if hasReturnDoc and (not hasReturnValue and hasNoReturnValue) and not hasUndefinedOrVarType:
addError(docNode, "Contains documentation for return value but returns nothing.", funcNode)
if hasReturnDoc and hasReturnValue and hasNoReturnValue and not hasUndefinedOrVarType:
addError(docNode, "Contains documentation for return value but at least one return statement has no value.", funcNode)
if hasReturnValue and not hasReturnDoc:
addError(docNode, "Missing documentation for return value.", funcNode)
return docNode
########################################################################################
#
# COMMON STUFF
#
#######################################################################################
def handleJSDocDecsription(attrib_desc, treeItem=None):
descNode = None
err_node = None
if "text" in attrib_desc:
if "TODOC" in attrib_desc["text"]:
err_node = createError("Documentation is missing.", treeItem)
descNode = tree.Node("desc").set("text", attrib_desc["text"])
return descNode, err_node
def handleJSDocSee(attrib_see, treeItem=None):
result_node = None
err_node = None
if not 'name' in attrib_see:
err_node = createError("Missing target for see.", treeItem)
else:
result_node = tree.Node("see").set("name", attrib_see["name"])
if "text" in attrib_see:
desc_node = tree.Node("desc").set("text", attrib_see["text"])
result_node.addChild(desc_node)
return result_node, err_node
def findAttachMethods(docTree):
attachMap = {}
sections = {"attach": "members", "attachStatic" :"statics"}
for method in methodNodeIterator(docTree):
for child in method.children:
if child.type in ("attach", "attachStatic"):
target_class = child.get("targetClass")
if target_class not in attachMap:
attachMap[target_class] = {"statics": {}, "members": {}}
target_method = child.get("targetMethod")
if not target_method:
target_method = method.get("name")
cmethod = attachMap[target_class][sections[child.type]][target_method] = copy.deepcopy(method) # copy.deepcopy(method)?
# patch isStatics in target class
if sections[child.type] == "statics":
cmethod.set("isStatic", True)
else:
cmethod.set("isStatic", False)
cmethod.set("sourceClass", child.get("sourceClass"))
cmethod.set("sourceMethod", method.get("name"))
clazz = None
for node in treeutil.findNode(docTree, ["class"], [("fullName", child.get("sourceClass"))]):
clazz = node
if clazz and "group" in clazz.attributes:
cmethod.set("group", clazz.attributes["group"])
return attachMap
def variableIsClassName(varItem):
length = len(varItem.children)
for i in range(length):
varChild = varItem.children[i]
if not varChild.type == "identifier":
return False
if i < length - 1:
# This is not the last identifier -> It must a package (= lowercase)
if not varChild.get("name").islower():
return False
else:
# This is the last identifier -> It must the class name (= first letter uppercase)
if not varChild.get("name")[0].isupper():
return False
return True
def getValue(item):
value = None
if item.type == "constant":
if item.get("constantType") == "string":
value = '"' + item.get("value") + '"'
else:
value = item.get("value")
elif item.isVar():
value, isComplete = treeutil.assembleVariable(item)
if not isComplete:
value = "[Complex expression]"
elif item.type == "operation" and item.get("operator") == "SUB":
# E.g. "-1" or "-Infinity"
value = "-" + getValue(item.getFirstChild())
if value == None:
value = "[Unsupported item type: " + item.type + "]"
return value
def addTypeInfo(node, commentAttrib=None, item=None):
if commentAttrib == None:
if node.type == "constant" and node.get("value", False):
pass
elif node.type == "param":
addError(node, "Parameter is not documented: '%s'" % commentAttrib.get("name"), item)
elif node.type == "return":
addError(node, "Return value is not documented.", item)
else:
addError(node, "Documentation is missing.", item)
return
# add description
if "text" in commentAttrib:
descNode = treeutil.findChild(node, "desc")
if descNode:
# add any additional text attributes (e.g. type description) to the
# existing desc node
descNode.set("text", descNode.get("text") + commentAttrib["text"])
else:
node.addChild(tree.Node("desc").set("text", commentAttrib["text"]))
# add types
if "type" in commentAttrib and commentAttrib["type"] and not commentAttrib["category"] == "throws":
typesNode = tree.Node("types")
node.addChild(typesNode)
for item in commentAttrib["type"]:
itemNode = tree.Node("entry")
typesNode.addChild(itemNode)
itemNode.set("type", item["type"])
if item["dimensions"] != 0:
itemNode.set("dimensions", item["dimensions"])
# add default value
if "defaultValue" in commentAttrib:
defaultValue = commentAttrib["defaultValue"]
if defaultValue != None:
# print "defaultValue: %s" % defaultValue
node.set("defaultValue", defaultValue)
# optional parameter?
if "optional" in commentAttrib and commentAttrib["optional"]:
node.set("optional", commentAttrib["optional"])
def addEventNode(classNode, classItem, commentAttrib):
node = tree.Node("event")
node.set("name", commentAttrib["name"])
if "text" in commentAttrib:
node.addChild(tree.Node("desc").set("text", commentAttrib["text"]))
# add types
if "type" in commentAttrib:
typesNode = tree.Node("types")
node.addChild(typesNode)
for item in commentAttrib["type"]:
itemNode = tree.Node("entry")
typesNode.addChild(itemNode)
itemNode.set("type", item["type"])
if item["dimensions"] != 0:
itemNode.set("dimensions", item["dimensions"])
classNode.addListChild("events", node)
def createError(msg, syntaxItem=None):
errorNode = tree.Node("error")
errorNode.set("msg", msg)
if syntaxItem:
(line, column) = treeutil.getLineAndColumnFromSyntaxItem(syntaxItem)
if line:
errorNode.set("line", line)
if column:
errorNode.set("column", column)
return errorNode
def addError(node, msg, syntaxItem=None):
errorNode = createError(msg, syntaxItem)
node.addListChild("errors", errorNode)
node.set("hasError", True)
##
# Adds a child node to <node>, handles error nodes and None as <child_node>.
# - allows both child and error node at the same time
def addChildIf(node, child_node, err_node, force=False):
if err_node != None:
node.addListChild("errors", err_node)
node.set("hasError", True)
if child_node != None:
node.addChild(child_node)
return node
def getPackageNode(docTree, namespace):
currPackage = docTree
childPackageName = ""
for nsPart in namespace.split("."):
childPackage = currPackage.getListChildByAttribute("packages", "name", nsPart, False)
childPackageName += nsPart
if not childPackage:
# The package does not exist -> Create it
childPackage = tree.Node("package")
childPackage.set("name", nsPart)
childPackage.set("fullName", childPackageName)
childPackage.set("packageName", (childPackageName.replace("." + nsPart, "")
if "." in childPackageName else "" )
)
currPackage.addListChild("packages", childPackage)
childPackageName += "."
# Update current package
currPackage = childPackage
return currPackage
##
# Get (or create) the node for the given class name in the docTree
#
def classNodeFromDocTree(docTree, fullClassName, commentAttributes = None):
if commentAttributes == None:
commentAttributes = {}
packageName = ""
className = fullClassName
classNode = None
package = None
if "." in fullClassName:
dotIndex = fullClassName.rindex(".")
packageName = fullClassName[:dotIndex]
className = fullClassName[dotIndex+1:]
package = getPackageNode(docTree, packageName)
classNode = package.getListChildByAttribute("classes", "name", className, False)
else:
package = docTree
classNode = package.getListChildByAttribute("classes", "name", className, False)
if not classNode:
# The class does not exist -> Create it
classNode = tree.Node("class")
classNode.set("name", className)
classNode.set("fullName", fullClassName)
classNode.set("packageName", packageName)
# Read all description, param and return attributes
for attrib in commentAttributes:
# Add description
if attrib["category"] == "description":
classNode = addChildIf(classNode, *(handleJSDocDecsription(attrib)))
elif attrib["category"] == "group":
classNode.set("group", attrib["name"])
elif attrib["category"] == "see":
classNode = addChildIf(classNode, *(handleJSDocSee(attrib)))
if package:
if fullClassName in lang.BUILTIN:
pass # don't add JS built-in classes
else:
package.addListChild("classes", classNode)
return classNode
def connectPackage(docTree, packageNode):
childHasError = False
packages = packageNode.getChild("packages", False)
if packages:
packages.children.sort(nameComparator)
for node in packages.children:
Context.console.dot()
hasError = connectPackage(docTree, node)
if hasError:
childHasError = True
classes = packageNode.getChild("classes", False)
if classes:
classes.children.sort(nameComparator)
for node in classes.children:
Context.console.dot()
hasError = connectClass(docTree, node)
if hasError:
childHasError = True
if childHasError:
packageNode.set("hasWarning", True)
return childHasError
def connectClass(docTree, classNode):
# mark property apply methods
markPropertyApply(docTree, classNode)
# Sort child classes
childClasses = classNode.get("childClasses", False)
if childClasses:
classArr = childClasses.split(",")
classArr.sort()
childClasses = ",".join(classArr)
classNode.set("childClasses", childClasses)
# Mark overridden items
postWorkItemList(docTree, classNode, "constructor", True)
postWorkItemList(docTree, classNode, "properties", True)
postWorkItemList(docTree, classNode, "events", False)
postWorkItemList(docTree, classNode, "methods", True)
postWorkItemList(docTree, classNode, "methods-static", False)
# Check whether the class is static
superClassName = classNode.get("superClass", False)
if not superClassName \
and classNode.getChild("properties", False) == None \
and classNode.getChild("methods", False) == None:
# This class is static
classNode.set("isStatic", True)
# Check for errors
childHasError = (
classNode.get("hasError", False) or
listHasError(classNode, "constructor") or
listHasError(classNode, "properties") or
listHasError(classNode, "methods") or
listHasError(classNode, "methods-static") or
listHasError(classNode, "constants") or
listHasError(classNode, "events")
)
if childHasError:
classNode.set("hasWarning", True)
return childHasError
def documentApplyMethod(methodNode, props):
if methodNode.getChild("desc", False) != None:
return
firstParam = treeutil.selectNode(methodNode, "params/param[1]/@name")
if firstParam is None:
firstParam = "value"
secondParam = treeutil.selectNode(methodNode, "params/param[2]/@name")
if secondParam is None:
secondParam = "old"
paramType = "var"
paramTypes = []
propNames = []
for prop in props:
propNames.append(prop.get("name"))
pType = prop.get("check", False)
if pType is False or pType == "Custom check function.":
pType = "var"
paramTypes.append(pType)
# if all properties have the same value for "check", use that
if paramTypes[1:] == paramTypes[:-1]:
paramType = paramTypes[0]
if len(propNames) > 1:
propNames.sort()
propList = "</code>, <code>".join(propNames[:-1]) + "</code> and <code>" + propNames[-1]
propNamesString = "properties <code>%s</code>" %propList
linkList = "}, {@link #".join(propNames[:-1]) + "} and {@link #" + propNames[-1]
propLinksString = "s: {@link #%s}" %linkList
else:
propNamesString = "property <code>%s</code>" %propNames[0]
propLinksString = ": {@link #%s}" %propNames[0]
functionCode = ("/**\n"
"* Applies changes of the property value of the %(propNames)s.\n"
"*\n"
"* For further details take a look at the property definition%(propLinks)s.\n"
"*\n"
"* @param %(firstParamName)s {%(paramType)s} new value of the property\n"
"* @param %(secondParamName)s {%(paramType)s} previous value of the property (null if it was not yet set).\n"
"*/\n"
"function(%(firstParamName)s, %(secondParamName)s) {}") % ({
"firstParamName": firstParam,
"secondParamName": secondParam,
"paramType": paramType,
"propNames": propNamesString,
"propLinks": propLinksString,
"propName": methodNode.get("name")
})
node = treeutil.compileString(functionCode)
commentAttributes = Comment.parseNode(node)[-1]
docNode = handleFunction(node, methodNode.get("name"), commentAttributes, treeutil.selectNode(methodNode, "../.."))
oldParams = methodNode.getChild("params", False)
if oldParams:
methodNode.replaceChild(oldParams, docNode.getChild("params"))
else:
methodNode.addChild(docNode.getChild("params"))
oldDesc = methodNode.getChild("desc", False)
if oldDesc:
methodNode.replaceChild(oldDesc, docNode.getChild("desc"))
else:
methodNode.addChild(docNode.getChild("desc"))
def markPropertyApply(docTree, classNode):
# Sort the list
sortByName(classNode, "methods")
# Post work all items
methods = classNode.getChild("methods", False)
if not methods:
return
dependendClasses = [cls for cls in dependendClassIterator(docTree, classNode)]
for itemNode in methods.children:
name = itemNode.get("name")
for dep in dependendClasses:
props = dep.getChild("properties", False)
if not props:
continue
applyFor = []
for prop in props.children:
if prop.get("apply", False) == name:
propNode = tree.Node("entry")
propNode.set("applies", dep.get("fullName") + "#" + prop.get("name"))
itemNode.addListChild("apply", propNode)
removeErrors(itemNode)
applyFor.append(prop)
if len(applyFor) > 0:
documentApplyMethod(itemNode, applyFor)
def dependendClassIterator(docTree, classNode):
yield classNode
directDependencies = []
superClassName = classNode.get("superClass", False)
if superClassName:
directDependencies.append(superClassName)
for list_ in ["mixins", "interfaces"]:
listItems = classNode.get(list_, False)
if listItems:
directDependencies.extend(listItems.split(","))
for list_ in ["superMixins", "superInterfaces"]:
listNode = classNode.getChild(list_, False)
if listNode:
directDependencies.extend([depNode.get("name") for depNode in listNode.children])
for dep in directDependencies:
for cls in dependendClassIterator(docTree, classNodeFromDocTree(docTree, dep)):
yield cls
def itemHasAnyDocs(node):
if node.getChild("desc", False) != None:
return True
if node.hasChildren():
for child in node.children:
if child.type == "params":
for param in child.children:
if param.getChild("desc", False) != None:
return True
elif child.type != "errors":
return True
return False
def postWorkItemList(docTree, classNode, listName, overridable):
"""Does the post work for a list of properties or methods."""
# Sort the list
sortByName(classNode, listName)
# Post work all items
listNode = classNode.getChild(listName, False)
if listNode:
for itemNode in listNode.children:
name = itemNode.get("name")
# Check whether this item is overridden and try to inherit the
# documentation from the next matching super class
if not overridable:
continue
superClassName = classNode.get("superClass", False)
overriddenFound = False
docFound = itemHasAnyDocs(itemNode)
# look for documentation in interfaces
if (not docFound):
for item in dependendClassIterator(docTree, classNode):
if item == classNode:
continue
if item.get("type", False) in ("interface", "mixin"):
interfaceItemNode = item.getListChildByAttribute(listName, "name", name, False)
if not interfaceItemNode:
continue
if item.get("type", "") == "mixin" and not interfaceItemNode.get("isCtor", False):
# item overrides a mixin item included by a super class
overriddenFound = True
itemNode.set("overriddenFrom", item.get("fullName"))
itemNode.set("docFrom", item.get("fullName"))
docFound = itemHasAnyDocs(interfaceItemNode)
# Remove previously recorded documentation errors from the item
# (Any documentation errors will be recorded in the super class)
removeErrors(itemNode)
break
# look for documentation in super classes
while superClassName and (not overriddenFound or not docFound):
superClassNode = classNodeFromDocTree(docTree, superClassName)
superItemNode = superClassNode.getListChildByAttribute(listName, "name", name, False)
if superItemNode:
if not docFound:
# This super item has a description
# -> Check whether the parameters match
# NOTE: paramsMatch works for properties, too
# (Because both compared properties always have no params)
if paramsMatch(itemNode, superItemNode):
# The parameters match -> We can use the documentation of the super class
itemNode.set("docFrom", superClassName)
docFound = itemHasAnyDocs(superItemNode)
# Remove previously recorded documentation errors from the item
# (Any documentation errors will be recorded in the super class)
removeErrors(itemNode)
else:
errorsNode = itemNode.getChild("errors", False)
if errorsNode:
if len(errorsNode.getChildren()) > 0:
errorNode = errorsNode.getChildren()[0]
msg = errorNode.get("msg") + " Signature of overriding method different from superclass method."
errorNode.set("msg", msg)
docFound = True
if not overriddenFound:
# This super class has the item defined -> Add a overridden attribute
itemNode.set("overriddenFrom", superClassName)
overriddenFound = True
# Check the next superclass
superClassName = superClassNode.get("superClass", False)
if not docFound and itemNode.get("overriddenFrom", False):
# This item is overridden, but we didn't find any documentation in the
# super classes -> Add a warning
itemNode.set("hasWarning", True)
def paramsMatch(methodNode1, methodNode2):
params1 = methodNode1.getChild("params", False)
params2 = methodNode2.getChild("params", False)
if params1 == None or params2 == None:
# One method has no parameters -> The params match if both are None
return params1 == params2
elif len(params1.children) != len(params2.children):
# The param count is different -> The params don't match
return False
else:
for i in range(len(params1.children)):
par1 = params1.children[i]
par2 = params2.children[i]
if (par1.get("name") != par2.get("name")):
# These parameters don't match
return False
# All tests passed
return True
def removeErrors(node):
errors = node.getChild("errors", False)
node.remove("hasWarning")
if errors:
node.removeChild(errors)
node.remove("hasError")
def sortByName(node, listName):
listNode = node.getChild(listName, False)
if listNode:
listNode.children.sort(nameComparator)
def nameComparator(node1, node2):
name1 = node1.get("name").lower()
name2 = node2.get("name").lower()
return cmp(name1, name2)
def listHasError(node, listName):
listNode = node.getChild(listName, False)
if listNode:
for childNode in listNode.children:
if childNode.get("hasError", False):
return True
return False
def packagesToJsonString(node, prefix = "", childPrefix = " ", newLine="\n", encoding="utf-8"):
asString = prefix + '{type:"' + tree.escapeJsonChars(node.type) + '"'
if node.type == "class":
node.set("externalRef", True)
if node.hasAttributes():
asString += ',attributes:{'
firstAttribute = True
for key in node.attributes:
if not firstAttribute:
asString += ','
asString += '"' + key + '":"' + tree.escapeJsonChars(node.attributes[key]) + '"'
firstAttribute = False
asString += '}'
if node.type == "class":
node.remove("externalRef")
if node.hasChildren() and node.type != "class":
asString += ',children:[' + newLine
prefix = prefix + childPrefix
for child in node.children:
asString += packagesToJsonString(child, prefix, childPrefix, newLine) + ',' + newLine
# NOTE We remove the ',\n' of the last child
if newLine == "":
asString = asString[:-1] + prefix + ']'
else:
asString = asString[:-2] + newLine + prefix + ']'
asString += '}'
return asString
##
# interface function
def getPackageData(node):
data = {
"type" : node.type
}
if node.type == "class":
node.set("externalRef", True)
if node.hasAttributes():
data["attributes"] = {}
for key in node.attributes:
data["attributes"][key] = node.attributes[key]
if node.type == "class":
node.remove("externalRef")
if node.hasChildren() and node.type != "class":
data["children"] = []
for child in node.children:
data["children"].append(getPackageData(child))
return data
def packagesToXmlString(node, prefix = "", childPrefix = " ", newLine="\n", encoding="utf-8"):
if node.type == "class":
node.set("externalRef", True)
hasText = False
asString = prefix + "<" + node.type
if node.hasAttributes():
for key in node.attributes:
if key == "text":
hasText = True
else:
asString += " " + key + "=\"" + tree.escapeXmlChars(node.attributes[key], True, encoding) + "\""
if node.type == "class":
node.remove("externalRef")
if not node.hasChildren() and not hasText:
asString += "/>" + newLine
else:
asString += ">"
if hasText:
asString += newLine + prefix + childPrefix
asString += "<text>" + tree.escapeXmlChars(node.attributes["text"], False, encoding) + "</text>" + newLine
if node.hasChildren():
asString += newLine
for child in node.children:
asString += packagesToXmlString(child, prefix + childPrefix, childPrefix, newLine, encoding)
asString += prefix + "</" + node.type + ">" + newLine
return asString
def classNodeIterator(docTree):
if docTree.type == "class":
yield docTree
return
if docTree.hasChildren():
for child in docTree.children:
for cls in classNodeIterator(child):
yield cls
def methodNodeIterator(docTree):
if docTree.type == "method":
yield docTree
return
if docTree.hasChildren():
for child in docTree.children:
for method in methodNodeIterator(child):
yield method
def docTreeIterator(docTree, type_):
if docTree.type == type_:
yield docTree
if docTree.children:
for child in docTree.children:
for entry in docTreeIterator(child, type_):
yield entry
def errorNodeIterator(docTree):
if docTree.get("hasError", False) or docTree.get("hasWarning", False):
yield docTree
if docTree.hasChildren():
for child in docTree.children:
for fcn in errorNodeIterator(child):
yield fcn
################################################################################
#
# API DOC VERIFICATION
#
################################################################################
# TODO: move to treeutil?
def getParentAttrib(node, attrib, type=None):
while node:
if node.hasAttributes():
if attrib in node.attributes:
if type:
if node.type == type:
return node.attributes[attrib]
else:
return node.attributes[attrib]
if node.hasParent():
node = node.parent
else:
node = None
return None
def getTopPackage(node):
while node:
if node.hasAttributes():
if "packageName" in node.attributes:
if node.attributes["packageName"] == "":
return node.get("name")
elif not "." in node.attributes["packageName"]:
return node.get("packageName")
if node.hasParent():
node = node.parent
else:
node = None
return None
def verifyLinks(docTree, index):
Context.console.info("Verifying internal doc links...", False)
linkRegExp = re.compile("\{\s*@link\s*([\w#-_\.]*)[\W\w\d\s]*?\}")
descNodes = docTree.getAllChildrenOfType("desc")
links = []
for descNode in descNodes:
if not "@link" in descNode.attributes["text"]:
continue
match = linkRegExp.findall(descNode.attributes["text"])
if not match:
continue
internalLinks = []
for link in match:
if not "<a" in link:
internalLinks.append(link)
if len(internalLinks) > 0:
nodeType = descNode.parent.type
if nodeType == "param":
itemName = getParentAttrib(descNode.parent, "name")
paramName = getParentAttrib(descNode, "name")
paramForType = descNode.parent.parent.parent.type
else:
itemName = getParentAttrib(descNode, "name")
paramName = None
paramForType = None
linkData = {
"nodeType": nodeType,
"packageName": getParentAttrib(descNode, "packageName"),
"className": getParentAttrib(descNode, "name", "class"),
"itemName": itemName,
"paramName": paramName,
"paramForType": paramForType,
"links": internalLinks,
"parent": descNode.parent
}
links.append(linkData)
count = 0
classesWithWarnings = []
for link in links:
count += 1
Context.console.progress(count, len(links))
result = checkLink(link, docTree, index)
if result:
for ref, link in result.iteritems():
addError(link["parent"], "Unknown link target: '%s'" % ref)
if not link["className"] in classesWithWarnings:
parent = link["parent"]
while parent:
if parent.type == "class":
classesWithWarnings.append(link["className"])
parent.set("hasWarning", True)
parent = None
break
if hasattr(parent, "parent"):
parent = parent.parent
def checkLink(link, docTree, index):
brokenLinks = {}
def getTargetName(ref):
targetPackageName = None
targetClassName = None
targetItemName = None
classItem = ref.split("#")
# internal class item reference
if classItem[0] == "":
targetPackageName = link["packageName"]
targetClassName = link["className"]
else:
namespace = classItem[0].split(".")
targetPackageName = ".".join(namespace[:-1])
if targetPackageName == "":
if link["nodeType"] == "package":
targetPackageName = link["packageName"] + "." + link["itemName"]
else:
targetPackageName = link["packageName"]
targetClassName = namespace[-1]
if len(classItem) == 2:
targetItemName = classItem[1]
return (targetPackageName + "." + targetClassName, targetItemName)
def isClassInHierarchy(docTree, className, searchFor):
targetClass = docTree.getChildByTypeAndAttribute("class", "fullName", className, False, True)
if not targetClass:
return False
while targetClass:
if targetClass.attributes["fullName"] in searchFor:
return True
if "mixins" in targetClass.attributes:
for wanted in searchFor:
if wanted in targetClass.attributes["mixins"]:
return True
if "superClass" in targetClass.attributes:
superClassName = targetClass.attributes["superClass"]
targetClass = docTree.getChildByTypeAndAttribute("class", "fullName", superClassName, False, True)
else:
targetClass = None
return False
for ref in link["links"]:
# Remove parentheses from method references
if ref[-2:] == "()":
ref = ref[:-2]
# ref is a fully qualified package or class name
if ref in index["__fullNames__"]:
continue
name = getTargetName(ref)
targetClassName = name[0]
targetItemName = name[1]
# unknown class or package
if not targetClassName in index["__fullNames__"]:
brokenLinks[ref] = link
continue
# valid package or class ref
if not targetItemName:
continue
# unknown class item
if not "#" + targetItemName in index["__index__"]:
# the index doesn't tell us if the class is static
# so we have to assume #construct is a valid target
if targetItemName != "construct":
brokenLinks[ref] = link
continue
classHasItem = False
classesWithItem = []
# get all classes that have an item with the same name as the referenced item
for occurrence in index["__index__"]["#" + targetItemName]:
className = index["__fullNames__"][occurrence[1]]
classesWithItem.append(className)
if targetClassName == className:
classHasItem = True
break
if classHasItem:
continue
# search for a superclass or included mixin with the referenced item
classHasItem = isClassInHierarchy(docTree, targetClassName, classesWithItem)
if not classHasItem:
brokenLinks[ref] = link
return brokenLinks
def verifyTypes(docTree, index):
Context.console.info("Verifying types...", False)
knownTypes = lang.GLOBALS[:]
knownTypes = knownTypes + ["var", "null",
# additional types supported by the property system:
"Integer", "PositiveInteger", "PositiveNumber",
"Float", "Double", "Map",
"Node", "Element", "Document", "Window",
"Event", "Class", "Mixin", "Interface", "Theme",
"Color", "Decorator", "Font"
]
count = 0
docNodes = docTree.getAllChildrenOfType("return")
docNodes = docNodes + docTree.getAllChildrenOfType("param")
docNodes = docNodes + docTree.getAllChildrenOfType("childControl")
total = len(docNodes)
for docNode in docNodes:
count += 1
Context.console.progress(count, total)
for typesNode in docNode.getAllChildrenOfType("types"):
for entryNode in typesNode.getAllChildrenOfType("entry"):
unknownTypes = []
entryType = entryNode.get("type")
if (not entryType in knownTypes) and not ("value" in entryType and re.search("[\<\>\=]", entryType)):
unknownTypes.append(entryType)
if len(unknownTypes) > 0:
itemName = getParentAttrib(docNode, "name")
packageName = getParentAttrib(docNode, "packageName")
className = getParentAttrib(docNode, "name", "class")
linkData = {
"itemName": itemName,
"packageName": packageName,
"className": className,
"nodeType": docNode.parent.type,
"links": unknownTypes
}
docNodeType = ""
if docNode.type == "param":
docNodeType = "Parameter '%s'" % docNode.get("name")
elif docNode.type == "return":
docNodeType = "Return value"
elif docNode.type == "childControl":
docNodeType = "Child control '%s'" % docNode.get("name")
classesWithWarnings = []
for ref in checkLink(linkData, docTree, index):
fullName = "%s.%s#%s" % (packageName, className, itemName)
#msg = "%s of %s is documented as unknown type '%s'" % (docNodeType, fullName, ref)
msg = "%s: Unknown type '%s'" % (docNodeType, ref)
if (docNode.parent.get("name", False)):
#Add error to method/event/... node, not params node
addError(docNode.parent, msg)
else:
addError(docNode.parent.parent, msg)
if not linkData["className"] in classesWithWarnings:
parent = docNode
while parent:
if parent.type == "class":
classesWithWarnings.append(linkData["className"])
parent.set("hasWarning", True)
parent = None
break
if hasattr(parent, "parent"):
parent = parent.parent
def verifyDocPercentage(docTree):
packages = {}
for docNode in treeutil.nodeIterator(docTree, ["package", "class", "property", "event", "method"]):
pkg = getTopPackage(docNode)
if pkg == "":
import pydb
pydb.set_trace()
if not pkg in packages:
packages[pkg] = {
"documentableItems": 0,
"undocumentedItems": 0
}
packages[pkg]["documentableItems"] += 1
if docNode.get("hasError", False):
packages[pkg]["undocumentedItems"] += 1
for pkgName, pkgStats in packages.iteritems():
Context.console.info("API Documentation Statistics for package '%s':" % pkgName)
undocumentedItems = pkgStats["undocumentedItems"]
documentableItems = pkgStats["documentableItems"]
percentageWithErrors = (float(undocumentedItems) / documentableItems) * 100
percentageOk = "{0:.2f}".format(100 - percentageWithErrors)
Context.console.indent()
Context.console.info("%s API items total" % documentableItems)
Context.console.info("%s API items with missing or incomplete documentation" % undocumentedItems)
Context.console.info("%s%% API documentation completeness" % percentageOk)
Context.console.outdent()
def logErrors(docTree, targets):
for errNode in treeutil.nodeIterator(docTree, ["error"]):
if "console" in targets:
itemName = getParentAttrib(errNode, "fullName")
itemType = errNode.parent.parent.type
if itemType == 'doctree':
Context.console.warn(errNode.get("msg"))
if not itemType in ["class", "package"]:
#itemName = itemName + "#" + getParentAttrib(errNode, "name")
pass
line = errNode.get("line", False)
column = errNode.get("column", False)
lineCol = ""
if line:
lineCol = " (" + str(line)
if column:
lineCol = "%s,%s" % (lineCol, str(column))
lineCol = lineCol + ")"
Context.console.warn("%s%s: %s" % (itemName, lineCol, errNode.get("msg")))
if not "data" in targets:
for node in errorNodeIterator(docTree):
removeErrors(node)
| python | 74,555 |
import pytest
from panini.test_client import TestClient, get_logger_files_path
from panini import app as panini_app
def run_panini():
app = panini_app.App(
service_name="test_encoding",
host="127.0.0.1",
port=4222,
app_strategy="asyncio",
logger_in_separate_process=False,
logger_files_path=get_logger_files_path(),
)
@app.listen("test_encoding.foo")
async def foo(msg):
return {"len": len(msg.data["data"])}
@app.listen("test_encoding.helper.correct")
async def helper(msg):
return {"data": "data"}
@app.listen("test_encoding.helper.incorrect")
async def helper(msg):
return "message not dict"
@app.listen("test_encoding.message.incorrect")
async def bar(msg):
await app.request(
subject="test_encoding.helper.correct", message="message not dict"
)
return {"success": True}
@app.listen("test_encoding.message.correct")
async def bar(msg):
await app.request(
subject="test_encoding.helper.incorrect", message={"data": "some data"}
)
return {"success": True}
@app.listen("test_encoding.correct")
async def bar(msg):
await app.request(
subject="test_encoding.helper.correct", message={"data": "some data"}
)
return {"success": True}
app.start()
@pytest.fixture(scope="module")
def client():
client = TestClient(run_panini)
client.start()
yield client
client.stop()
def test_encoding(client):
response = client.request("test_encoding.foo", {"data": "some correct data"})
assert response["len"] == 17
response = client.request("test_encoding.foo", {"data": "не латинские символы"})
assert response["len"] == 20
def test_correct_message_format(client):
response = client.request("test_encoding.correct", {"data": "some data"})
assert response["success"] is True
def test_incorrect_message_format(client):
with pytest.raises(OSError):
client.request("test_encoding.message.correct", {"data": "some data"})
with pytest.raises(OSError):
client.request("test_encoding.message.incorrect", {"data": "some data"})
| python | 2,226 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
PUBLISHING_RATE = 50 # Rate of publishing
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5.0)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# TODO: Create `Controller` object
self.controller = Controller(vehicle_mass, fuel_capacity, brake_deadband, decel_limit,
accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/twist_cmd',TwistStamped, self.twist_cb)
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/current_velocity',TwistStamped, self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(PUBLISHING_RATE)
while not rospy.is_shutdown():
# TODO: Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
# throttle, brake, steering = self.controller.control(<proposed linear velocity>,
# <proposed angular velocity>,
# <current linear velocity>,
# <dbw status>,
# <any other argument you need>)
# if <dbw is enabled>:
# self.publish(throttle, brake, steer)
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| python | 5,558 |
#!/usr/bin/env python
# encoding: utf-8
'''
views.py
Created by mmiyaji on 2016-07-10.
Copyright (c) 2016 ruhenheim.org. All rights reserved.
'''
from django.shortcuts import render
from django.http import HttpResponse
import os, re, sys, commands, time, datetime, random, logging
from django.http import HttpResponse, HttpResponseRedirect
from django.template import Context, loader
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.contrib import auth
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
from django.utils.encoding import force_unicode, smart_str
from django.core import serializers
from django.conf import settings
from django.http import Http404
from django.utils.http import urlencode
from django.http import Http404
from django.template.loader import get_template
from mainapp.models import *
logger = logging.getLogger(__name__)
def home(request):
"""
Case of GET REQUEST '/'
home page
"""
temp_values = {
"subscroll":True,
}
return render(request, 'general/index.html', temp_values)
def login_view(request):
#強制的にログアウト
logout(request)
username = password = ''
first_name = last_name = email = ''
error_list = []
error_target = []
next_url = "/"
if request.GET:
username = request.GET.get('username','')
first_name = request.GET.get('first_name','')
last_name = request.GET.get('last_name','')
email = request.GET.get('email','')
error_code = request.GET.get('error_code','')
elif request.POST:
if 'siginup' in request.POST:
signup_view(request)
else:
username = request.POST['username']
password = request.POST['password']
next_url = request.POST.get('next', next_url)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect(next_url)
else:
error_list.append('login_failed')
else:
error_list.append('login_failed')
temp_values = {
"error_list": error_list,
"error_target": error_target,
"username": username,
"first_name": first_name,
"last_name": last_name,
"email": email,
}
return render(request, 'general/login.html', temp_values)
def signup_view(request):
username = password = password2 = ''
first_name = last_name = email = ''
error_list = []
error_target = []
if request.POST:
username = request.POST['username']
password = request.POST['password']
password2 = request.POST['password_confirm']
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
# is_staff = request.POST['is_staff']
if password == password2 and valid_pass(password) == 0:
if not User.objects.filter(username=username):
user = User.objects.create_user(username, email, password)
user.first_name = first_name
user.last_name = last_name
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
else:
error_list.append('wrong_user')
error_list.append('signup_failed')
else:
error_list.append('wrong_password')
error_list.append('signup_failed')
error_target.append('password')
error_target.append('password2')
temp_values = {
"error_list": error_list,
"error_target": error_target,
"username": username,
"first_name": first_name,
"last_name": last_name,
"email": email,
}
# query = urlencode(temp_values)
# url = ''.join([
# reverse('dansible:login'),
# '?',
# query])
# return HttpResponseRedirect(url)
return render(request, 'general/login.html', temp_values)
else:
raise Http404
def valid_pass(password):
"""
validate password
Arguments:
- `password`:
"""
if len(password) < 6:
return 1
return 0
| python | 4,766 |
#!/usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
package_ios.py - Build and Package Release and Rebug fat libraries for iOS.
"""
import argparse
import os
import shutil
import sys
def run(command, extra_options=''):
command = command + ' ' + ' '.join(extra_options)
print command
return os.system(command)
def build(out_dir, test_target, extra_options=''):
return run('ninja -C ' + out_dir + ' ' + test_target,
extra_options)
def lipo_libraries(out_dir, input_dirs, out_lib, input_lib):
lipo = "lipo -create "
for input_dir in input_dirs:
lipo += input_dir + "/" + input_lib + " "
lipo += '-output ' + out_dir + "/" + out_lib
return run(lipo)
def copy_build_dir(target_dir, build_dir):
try:
shutil.copytree(build_dir, target_dir, ignore=shutil.ignore_patterns('*.a'))
except OSError as e:
print('Directory not copied. Error: %s' % e)
return 0
def package_ios(out_dir, build_dir, build_config):
build_dir_sim = build_dir
build_dir_dev = build_dir +'-iphoneos'
build_target = 'cronet_package'
target_dir = out_dir + "/Cronet"
return build(build_dir_sim, build_target) or \
build(build_dir_dev, build_target) or \
copy_build_dir(target_dir, build_dir_dev + "/cronet") or \
lipo_libraries(target_dir, [build_dir_sim, build_dir_dev], \
"libcronet_" + build_config + ".a", \
"cronet/libcronet_standalone.a")
def package_ios_framework(out_dir='out/Framework', extra_options=''):
print 'Building Cronet Dynamic Framework...'
# Use Ninja to build all possible combinations.
build_dirs = ['Debug-iphonesimulator',
'Debug-iphoneos',
'Release-iphonesimulator',
'Release-iphoneos']
for build_dir in build_dirs:
print 'Building ' + build_dir
build_result = run('ninja -C out/' + build_dir + ' cronet_framework',
extra_options)
if build_result != 0:
return build_result
# Package all builds in the output directory
os.makedirs(out_dir)
for build_dir in build_dirs:
shutil.copytree(os.path.join('out', build_dir, 'Cronet.framework'),
os.path.join(out_dir, build_dir, 'Cronet.framework'))
if 'Release' in build_dir:
shutil.copytree(os.path.join('out', build_dir, 'Cronet.framework.dSYM'),
os.path.join(out_dir, build_dir, 'Cronet.framework.dSYM'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('out_dir', nargs=1, help='path to output directory')
parser.add_argument('-g', '--skip_gyp', action='store_true',
help='skip gyp')
parser.add_argument('-d', '--debug', action='store_true',
help='use release configuration')
parser.add_argument('-r', '--release', action='store_true',
help='use release configuration')
parser.add_argument('--framework', action='store_true',
help='build Cronet dynamic framework')
options, extra_options_list = parser.parse_known_args()
print options
print extra_options_list
out_dir = options.out_dir[0]
# Make sure that the output directory does not exist
if os.path.exists(out_dir):
print >>sys.stderr, 'The output directory already exists: ' + out_dir
return 1
gyp_defines = 'GYP_DEFINES="OS=ios enable_websockets=0 '+ \
'disable_file_support=1 disable_ftp_support=1 '+ \
'enable_errorprone=1 use_platform_icu_alternatives=1 ' + \
'disable_brotli_filter=1 chromium_ios_signing=0 ' + \
'target_subarch=both"'
if not options.skip_gyp:
run (gyp_defines + ' gclient runhooks')
if options.framework:
return package_ios_framework(out_dir, extra_options_list)
return package_ios(out_dir, "out/Release", "opt") or \
package_ios(out_dir, "out/Debug", "dbg")
if __name__ == '__main__':
sys.exit(main())
| python | 4,048 |
"""(Almost surely) constant random variables."""
from typing import Callable, TypeVar
import numpy as np
from probnum import config, linops
from probnum import utils as _utils
from probnum.typing import ArrayLikeGetitemArgType, ShapeArgType, ShapeType
from . import _random_variable
try:
# functools.cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
_ValueType = TypeVar("ValueType")
class Constant(_random_variable.DiscreteRandomVariable[_ValueType]):
"""Random variable representing a constant value.
Discrete random variable which (with probability one) takes a constant value. The
law / image measure of this random variable is given by the Dirac delta measure
which equals one in its (atomic) support and zero everywhere else.
This class has the useful property that arithmetic operations between a
:class:`Constant` random variable and an arbitrary :class:`RandomVariable` represent
the same arithmetic operation with a constant.
Parameters
----------
support
Constant value taken by the random variable. Also the (atomic) support of the
associated Dirac measure.
See Also
--------
RandomVariable : Class representing random variables.
Notes
-----
The Dirac measure formalizes the concept of a Dirac delta function as encountered in
physics, where it is used to model a point mass. Another way to formalize this idea
is to define the Dirac delta as a linear operator as is done in functional analysis.
While related, this is not the view taken here.
Examples
--------
>>> from probnum import randvars
>>> import numpy as np
>>> rv1 = randvars.Constant(support=0.)
>>> rv2 = randvars.Constant(support=1.)
>>> rv = rv1 + rv2
>>> rng = np.random.default_rng(seed=42)
>>> rv.sample(rng, size=5)
array([1., 1., 1., 1., 1.])
"""
def __init__(
self,
support: _ValueType,
):
if np.isscalar(support):
support = _utils.as_numpy_scalar(support)
self._support = support
support_floating = self._support.astype(
np.promote_types(self._support.dtype, np.float_)
)
if config.matrix_free:
cov = lambda: (
linops.Scaling(
0.0,
shape=(self._support.size, self._support.size),
dtype=support_floating.dtype,
)
if self._support.ndim > 0
else _utils.as_numpy_scalar(0.0, support_floating.dtype)
)
else:
cov = lambda: np.broadcast_to(
_utils.as_numpy_scalar(0.0, support_floating.dtype),
shape=(
(self._support.size, self._support.size)
if self._support.ndim > 0
else ()
),
)
var = lambda: np.broadcast_to(
_utils.as_numpy_scalar(0.0, support_floating.dtype),
shape=self._support.shape,
)
super().__init__(
shape=self._support.shape,
dtype=self._support.dtype,
parameters={"support": self._support},
sample=self._sample,
in_support=lambda x: np.all(x == self._support),
pmf=lambda x: np.float_(1.0 if np.all(x == self._support) else 0.0),
cdf=lambda x: np.float_(1.0 if np.all(x >= self._support) else 0.0),
mode=lambda: self._support,
median=lambda: support_floating,
mean=lambda: support_floating,
cov=cov,
var=var,
std=var,
)
@cached_property
def cov_cholesky(self):
# Pure utility attribute (it is zero anyway).
# Make Constant behave more like Normal with zero covariance.
return self.cov
@property
def support(self) -> _ValueType:
"""Constant value taken by the random variable."""
return self._support
def __getitem__(self, key: ArrayLikeGetitemArgType) -> "Constant":
"""(Advanced) indexing, masking and slicing.
This method supports all modes of array indexing presented in
https://numpy.org/doc/1.19/reference/arrays.indexing.html.
Parameters
----------
key : int or slice or ndarray or tuple of None, int, slice, or ndarray
Indices, slice objects and/or boolean masks specifying which entries to keep
while marginalizing over all other entries.
"""
return Constant(support=self._support[key])
def reshape(self, newshape: ShapeType) -> "Constant":
return Constant(
support=self._support.reshape(newshape),
)
def transpose(self, *axes: int) -> "Constant":
return Constant(
support=self._support.transpose(*axes),
)
def _sample(self, rng: np.random.Generator, size: ShapeArgType = ()) -> _ValueType:
size = _utils.as_shape(size)
if size == ():
return self._support.copy()
else:
return np.tile(self._support, reps=size + (1,) * self.ndim)
# Unary arithmetic operations
def __neg__(self) -> "Constant":
return Constant(
support=-self.support,
)
def __pos__(self) -> "Constant":
return Constant(
support=+self.support,
)
def __abs__(self) -> "Constant":
return Constant(
support=abs(self.support),
)
# Binary arithmetic operations
@staticmethod
def _binary_operator_factory(
operator: Callable[[_ValueType, _ValueType], _ValueType]
) -> Callable[["Constant", "Constant"], "Constant"]:
def _constant_rv_binary_operator(
constant_rv1: Constant, constant_rv2: Constant
) -> Constant:
return Constant(
support=operator(constant_rv1.support, constant_rv2.support),
)
return _constant_rv_binary_operator
| python | 6,126 |
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generate(nblocks=10)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generate(nblocks=1)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| python | 5,905 |
#!/usr/bin/python3
import time
import threading
import ctypes
WORK_DURATION = 600
LONG_BREAK_EVERY = 4
TITLE = 'pybreak'
MAIN_MENU_TEXT = 'Press OK to end session.'
SHORT_BREAK_TEXT = 'Go take a short break! Press OK when finished.'
LONG_BREAK_TEXT = 'Go take a long break! Press OK when finished.'
MAIN_MENU_ICON = 0x00000
BREAK_ICON = 0x40000
MessageBox = ctypes.windll.user32.MessageBoxW
def main():
thread = threading.Thread(target=loop, daemon=True)
thread.start()
menu_popup()
def loop():
while True:
time.sleep(WORK_DURATION)
if i % LONG_BREAK_EVERY == 0:
break_popup(LONG_BREAK_TEXT)
else:
break_popup(SHORT_BREAK_TEXT)
def menu_popup():
MessageBox(None, MAIN_MENU_TEXT, TITLE, MAIN_MENU_ICON)
def break_popup(text):
MessageBox(None, text, TITLE, BREAK_ICON)
if __name__=='__main__':
main()
| python | 897 |
from django.urls import path#This import allows us to use the path function(within urlpatterns)
from . import views #This will import our view.py module from the main project directory
"""
Please note the it must be 'urlpatterns' and not 'urlpattern'. This will be name specific and will cause an error otherwise
path() will allow us to create a url for a specified view
Note that more documention on views can be found in the main project directory 'tutorial_1'
"""
urlpatterns = [
path('register', views.register, name='register'),
path('login', views.login, name='login'),
path('logout', views.logout, name="logout")
] | python | 637 |
import pytest
from csv_diff import load_csv, compare
from castoredc_api.auth import auth_data
from castoredc_api.study.castor_study import CastorStudy
class TestCSVOutputArchived:
"""Tests whether the correct data is outputted.
When also extracting archived data"""
@pytest.fixture(scope="session")
def output_data_archived(self):
study = CastorStudy(
auth_data.client_id,
auth_data.client_secret,
auth_data.test_study_study_id,
"data.castoredc.com",
)
output_data_archived = study.export_to_csv(archived=True)
return output_data_archived
def test_study_export_archived(self, output_data_archived):
"""Tests if study export is correct."""
diff = compare(
load_csv(
open(output_data_archived["Study"]),
key="record_id",
),
load_csv(
open(
"tests/test_output/data_files_for_output_tests/CastorStudy - Archived.csv"
),
key="record_id",
),
)
assert diff["added"] == []
assert diff["removed"] == []
assert diff["columns_added"] == []
assert diff["columns_removed"] == []
assert diff["changed"] == [
{
"key": "110001",
"changes": {
"base_weight": ["88.0", "88"],
"base_sbp": ["120.0", "120"],
"base_dbp": ["65.0", "65"],
"base_hr": ["66.0", "66"],
"fac_V_leiden_number": ["55.0", "55"],
"base_tromboc": ["252.0", "252"],
"base_creat": ["88.0", "88"],
"fu_weight": ["66.0", "66"],
"fu_sbp": ["132.0", "132"],
"fu_dbp": ["72.0", "72"],
"fu_hr": ["69.0", "69"],
"fu_tromboc": ["366.0", "366"],
"fu_creat": ["99.0", "99"],
},
}
]
def test_qol_survey_export_without_missing_surveys_archived(
self, output_data_archived
):
"""Tests if survey export is correct.
Does not check for empty surveys"""
diff = compare(
load_csv(
open(output_data_archived["Surveys"]["QOL Survey"]),
key="survey_instance_id",
),
load_csv(
open(
"tests/test_output/data_files_for_output_tests/CastorQOLSurvey - Archived.csv"
),
key="survey_instance_id",
),
)
assert diff["removed"] == []
assert diff["columns_added"] == []
assert diff["columns_removed"] == []
assert diff["changed"] == [
{
"key": "4FF130AD-274C-4C8F-A4A0-A7816A5A88E9",
"changes": {"VAS": ["85.0", "85"]},
}
]
def test_qol_survey_export_archived(self, output_data_archived):
"""Tests if survey export is correct.
Does test for missing surveys."""
diff = compare(
load_csv(
open(output_data_archived["Surveys"]["QOL Survey"]),
key="survey_instance_id",
),
load_csv(
open(
"tests/test_output/data_files_for_output_tests/CastorQOLSurvey - Archived.csv"
),
key="survey_instance_id",
),
)
assert diff["removed"] == []
assert diff["columns_added"] == []
assert diff["columns_removed"] == []
assert diff["changed"] == [
{
"key": "4FF130AD-274C-4C8F-A4A0-A7816A5A88E9",
"changes": {"VAS": ["85.0", "85"]},
}
]
assert diff["added"] == []
def test_medication_report_export_archived(self, output_data_archived):
"""Tests if report export is correct."""
diff = compare(
load_csv(
open(output_data_archived["Reports"]["Medication"]),
key="custom_name",
),
load_csv(
open(
"tests/test_output/data_files_for_output_tests/CastorMedication - Archived.csv"
),
key="custom_name",
),
)
assert diff["removed"] == []
assert diff["columns_added"] == []
assert diff["columns_removed"] == []
assert diff["changed"] == []
assert diff["added"] == []
def test_unscheduled_visit_report_export_archived(self, output_data_archived):
"""Tests if report export is correct."""
diff = compare(
load_csv(
open(output_data_archived["Reports"]["Unscheduled visit"]),
key="custom_name",
),
load_csv(
open(
"tests/test_output/data_files_for_output_tests/CastorUnscheduledVisit - Archived.csv"
),
key="custom_name",
),
)
assert diff["removed"] == []
assert diff["columns_added"] == []
assert diff["columns_removed"] == []
assert diff["changed"] == []
assert diff["added"] == []
def test_comorbidities_report_export_archived(self, output_data_archived):
"""Tests if report export is correct."""
diff = compare(
load_csv(
open(output_data_archived["Reports"]["Comorbidities"]),
key="custom_name",
),
load_csv(
open(
"tests/test_output/data_files_for_output_tests/CastorComorbidities - Archived.csv"
),
key="custom_name",
),
)
assert diff["removed"] == []
assert diff["columns_added"] == []
assert diff["columns_removed"] == []
assert diff["changed"] == []
assert diff["added"] == []
def test_adverse_event_report_export_archived(self, output_data_archived):
"""Tests if report export is correct."""
diff = compare(
load_csv(
open(output_data_archived["Reports"]["Adverse event"]),
key="custom_name",
),
load_csv(
open(
"tests/test_output/data_files_for_output_tests/CastorAdverseEvent - Archived.csv"
),
key="custom_name",
),
)
assert diff["removed"] == []
assert diff["columns_added"] == []
assert diff["columns_removed"] == []
assert diff["changed"] == []
assert diff["added"] == []
| python | 6,779 |
"""
# Copyright Xiang Wang, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
Author: Xiang Wang, [email protected]
Status: Active
"""
import json
import copy
import codecs
import pandas as pd
from collections import defaultdict
from torch.utils.data import Dataset
from pandas.core.frame import DataFrame
class BaseDataset(Dataset):
"""
Dataset基类
Args:
data (:obj:`DataFrame` or :obj:`string`): 数据或者数据地址
categories (:obj:`list`, optional, defaults to `None`): 数据类别
is_retain_df (:obj:`bool`, optional, defaults to False): 是否将DataFrame格式的原始数据复制到属性retain_df中
is_retain_dataset (:obj:`bool`, optional, defaults to False): 是否将处理成dataset格式的原始数据复制到属性retain_dataset中
is_train (:obj:`bool`, optional, defaults to True): 数据集是否为训练集数据
is_test (:obj:`bool`, optional, defaults to False): 数据集是否为测试集数据
""" # noqa: ignore flake8"
def __init__(
self,
data,
categories=None,
is_retain_df=False,
is_retain_dataset=False,
is_train=True,
is_test=False
):
self.is_test = is_test
self.is_train = is_train
self.is_retain_df = is_retain_df
self.is_retain_dataset = is_retain_dataset
if self.is_test is True:
self.is_train = False
if isinstance(data, DataFrame):
if 'label' in data.columns:
data['label'] = data['label'].apply(lambda x: str(x))
if self.is_retain_df:
self.df = data
self.dataset = self._convert_to_dataset(data)
else:
self.dataset = self._load_dataset(data)
if categories is None:
self.categories = self._get_categories()
else:
self.categories = categories
if self.categories is not None:
self.cat2id = dict(zip(self.categories, range(len(self.categories))))
self.id2cat = dict(zip(range(len(self.categories)), self.categories))
self.class_num = len(self.cat2id)
def _get_categories(self):
return None
def _load_dataset(self, data_path):
"""
加载数据集
Args:
data_path (:obj:`string`): 数据地址
""" # noqa: ignore flake8"
data_df = self._read_data(data_path)
if self.is_retain_df:
self.df = data_df
return self._convert_to_dataset(data_df)
def _convert_to_dataset(self, data_df):
pass
def _read_data(
self,
data_path,
data_format=None,
skiprows=-1
):
"""
读取所需数据
Args:
data_path (:obj:`string`): 数据地址
data_format (:obj:`string`, defaults to `None`): 数据存储格式
skiprows (:obj:`int`, defaults to -1): 读取跳过指定行数,默认为不跳过
""" # noqa: ignore flake8"
if data_format is not None:
data_format = data_path.split('.')[-1]
if data_format == 'csv':
data_df = pd.read_csv(data_path, dtype={'label': str})
elif data_format == 'json':
try:
data_df = pd.read_json(data_path, dtype={'label': str})
except:
data_df = self.read_line_json(data_path)
elif data_format == 'tsv':
data_df = pd.read_csv(data_path, sep='\t', dtype={'label': str})
elif data_format == 'txt':
data_df = pd.read_csv(data_path, sep='\t', dtype={'label': str})
else:
raise ValueError("The data format does not exist")
return data_df
def read_line_json(
self,
data_path,
skiprows=-1
):
"""
读取所需数据
Args:
data_path (:obj:`string`): 数据所在路径
skiprows (:obj:`int`, defaults to -1): 读取跳过指定行数,默认为不跳过
"""
datasets = []
with codecs.open(data_path, mode='r', encoding='utf8') as f:
reader = f.readlines()
for index, line in enumerate(reader):
if index == skiprows:
continue
line = json.loads(line)
tokens = line['text']
label = line['label']
datasets.append({'text': tokens.strip(), 'label': label})
return pd.DataFrame(datasets)
def convert_to_ids(self, tokenizer):
"""
将文本转化成id的形式
Args:
tokenizer: 编码器
"""
if tokenizer.tokenizer_type == 'vanilla':
features = self._convert_to_vanilla_ids(tokenizer)
elif tokenizer.tokenizer_type == 'transfomer':
features = self._convert_to_transfomer_ids(tokenizer)
elif tokenizer.tokenizer_type == 'customized':
features = self._convert_to_customized_ids(tokenizer)
else:
raise ValueError("The tokenizer type does not exist")
if self.is_retain_dataset:
self.retain_dataset = copy.deepcopy(self.dataset)
self.dataset = features
def _convert_to_transfomer_ids(self, bert_tokenizer):
pass
def _convert_to_vanilla_ids(self, vanilla_tokenizer):
pass
def _convert_to_customized_ids(self, customized_tokenizer):
pass
def _get_input_length(self, text, bert_tokenizer):
pass
@property
def dataset_cols(self):
return list(self.dataset[0].keys())
@property
def to_device_cols(self):
return list(self.dataset[0].keys())
@property
def sample_num(self):
return len(self.dataset)
@property
def dataset_analysis(self):
_result = defaultdict(list)
for _row in self.dataset:
for _col in self.dataset_cols:
if type(_row[_col]) == str:
_result[_col].append(len(_row[_col]))
_report = pd.DataFrame(_result).describe()
return _report
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
| python | 6,167 |
import json
import time
from flask import Flask, request, abort
from flask_socketio import SocketIO, emit
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
socketio = SocketIO(app)
api_header_name = 'API-KEY'
debug_mode = True
api_key = 'test'
def check_api_key():
request_api_key = request.headers.get(api_header_name)
if not api_key == request_api_key:
abort(401)
"""
API Routes
"""
@app.route('/')
def index():
return 'Index Page'
@app.route('/data', methods=['POST'])
def data():
data_dict = request.get_json()
print(data_dict)
socketio.emit('overlayPositionUpdate', data_dict)
return 'OK'
"""
Websocket Routes
"""
@socketio.on('latency', namespace='/')
def latency_check(data):
print(data)
current_time = int(round(time.time() * 1000))
emit('latencyResponse', {'timestamp': current_time, 'timestamp_client': data['timestamp']})
@socketio.on('positionUpdate', namespace='/')
def latency_check(data):
print('X: {}, Y: {}'.format(data['x'], data['y']))
emit('overlayPositionUpdate', data)
if __name__ == '__main__':
socketio.run(app, debug=debug_mode)
| python | 1,143 |
for num1 in range(1,11):
print("Tabla de multiplicar del " + str(num1))
print("-----------")
for num2 in range(1,11):
print(str(num1) + " por " + str(num2) + " es " + str(num1*num2)) | python | 202 |
########################################################################################
# Davi Frossard, 2016 #
# VGG16 implementation in TensorFlow #
# Details: #
# http://www.cs.toronto.edu/~frossard/post/vgg16/ #
# #
# Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md #
# Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow #
########################################################################################
from numpy import *
import os
#from pylab import *
import numpy as np
from scipy.misc import imread, imresize
from caffe_classes import class_names
import numpy as np
#import matplotlib.pyplot as plt
#import matplotlib.cbook as cbook
import time
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.image as mpimg
from scipy.ndimage import filters
import urllib
from numpy import random
import scipy
import TensorFI as ti
import datetime
import tensorflow as tf
class vgg16:
def __init__(self, imgs, weights=None, sess=None):
self.imgs = imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc3l)
if weights is not None and sess is not None:
self.load_weights(weights, sess)
def convlayers(self):
self.parameters = []
# zero-mean input
with tf.name_scope('preprocess') as scope:
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
images = self.imgs-mean
# conv1_1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv1_2
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool1
self.pool1 = tf.nn.max_pool(self.conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv2_2
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool2
self.pool2 = tf.nn.max_pool(self.conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3_1
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_2
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_3
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool3
self.pool3 = tf.nn.max_pool(self.conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4_1
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_2
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_3
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool4
self.pool4 = tf.nn.max_pool(self.conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5_1
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_2
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_3
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool5
self.pool5 = tf.nn.max_pool(self.conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
def load_weights(self, weight_file, sess):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
print i, k, np.shape(weights[k])
sess.run(self.parameters[i].assign(weights[k]))
#if __name__ == '__main__':
sess = tf.Session()
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
# log the pre-trained weights
vgg = vgg16(imgs, 'vgg16_weights.npz', sess)
# Change Me: this is the label of your test image
label = 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi'
fi = ti.TensorFI(sess, logLevel = 50, name = "convolutional", disableInjections=False)
# inputs to be injected
index = [0,2,3,5,6,8,9,12,15,17]
# save FI results into file, "eachRes" saves each FI result, "resFile" saves SDC rate
resFile = open("vgg16-binFI.csv", "a")
eachRes = open("vgg16-binEach.csv", "a")
for i in index:
# Change me: load the images that you want to inject
img1 = imread("path_to_input_image")
img1 = scipy.misc.imresize(img1, [224,224,3])
totalFI = 0.
# initiliaze for binary FI
ti.faultTypes.initBinaryInjection()
while(ti.faultTypes.isKeepDoingFI):
prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1]})[0]
preds = (np.argsort(prob)[::-1])[0:5]
# you need to feedback the FI result to guide the next FI for binary search
if(class_names[preds[0]] == label):
# Fi does not result in SDC
ti.faultTypes.sdcFromLastFI = False
else:
ti.faultTypes.sdcFromLastFI = True
# if FI on the current data item, you might want to log the sdc bound for the bits of 0 or 1
# (optional), if you just want to measure the SDC rate, you can access the variable of "ti.faultTypes.sdcRate"
if(ti.faultTypes.isDoneForCurData):
eachRes.write(`ti.faultTypes.sdc_bound_0` + "," + `ti.faultTypes.sdc_bound_1` + ",")
# Important: initialize the binary FI for next data item.
ti.faultTypes.initBinaryInjection(isFirstTime=False)
print(i, ti.faultTypes.fiTime)
eachRes.write("\n")
resFile.write(`ti.faultTypes.sdcRate` + "," + `ti.faultTypes.fiTime` + "\n")
print(ti.faultTypes.sdcRate , "fi time: ", ti.faultTypes.fiTime)
| python | 15,392 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-20 18:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('billing', '0001_initial'),
('addresses', '0001_initial'),
('carts', '0001_initial'),
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(blank=True, max_length=120)),
('status', models.CharField(choices=[('created', 'Procesando'), ('paid', 'Pagado'), ('shipped', 'Enviado'), ('refunded', 'Reintegrado'), ('delivered', 'Entregado')], default='created', max_length=120)),
('payment_method', models.CharField(blank=True, choices=[('efectivo', 'Efectivo'), ('tarjeta', 'Tarjeta')], max_length=120, null=True)),
('cash_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=100, null=True)),
('cash_change', models.DecimalField(blank=True, decimal_places=2, max_digits=100, null=True)),
('shipping_total', models.DecimalField(decimal_places=2, default=500, max_digits=100)),
('total', models.DecimalField(decimal_places=2, default=0.0, max_digits=100)),
('active', models.BooleanField(default=True)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('billing_profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='billing.BillingProfile')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='carts.Cart')),
('shipping_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='addresses.Address')),
],
options={
'ordering': ['-timestamp', '-updated'],
},
),
migrations.CreateModel(
name='ProductPurchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(max_length=120)),
('refunded', models.BooleanField(default=False)),
('timestamp', models.DateTimeField(auto_now=True)),
('billing_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='billing.BillingProfile')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Variation')),
],
),
]
| python | 2,951 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `code_analysis` package."""
from click.testing import CliRunner
from code_analysis import java_dependencies as jvd
def test_java_dependencies():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(jvd.main, ['resources/java_depend.txt'])
assert result.exit_code == 0
assert 'MERGE (n:Package' in result.output
assert 'MERGE (m:Package' in result.output
assert 'MERGE (n)-[r:depends_on]->(m)' in result.output
assert 'com.company.abc.plaza.storage.ifc' in result.output
assert 'com.company.abc.general.basic.ifc.configuration' in result.output
def test_help():
runner = CliRunner()
help_result = runner.invoke(jvd.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| python | 861 |
import scipy.stats
import pickle
import matplotlib.pyplot as plt
import sys
# Load data
with open("image_data.bin", "rb") as f:
data = pickle.load(f)
# Print results
print("The result of the two sample t-test for ASD vs TD: {}".format(scipy.stats.ttest_ind(data["asd_var"], data["td_var"])))
print("-" * 20)
print("The result of the SRCC for ASD on mean image brightness: {}".format(scipy.stats.spearmanr(data["asd_var"], data["im_brightness"])))
print("The result of the SRCC for TD on mean image brightness: {}".format(scipy.stats.spearmanr(data["td_var"], data["im_brightness"])))
print("-" * 20)
print("The result of the SRCC for ASD on variance of image brightness: {}".format(scipy.stats.spearmanr(data["asd_var"], data["im_var"])))
print("The result of the SRCC for TD on variance of image brightness: {}".format(scipy.stats.spearmanr(data["td_var"], data["im_var"])))
# Show graphs
# Choose a graph
plt.xlabel("Weighted Variance of Fixmap")
if len(sys.argv) < 2:
print("No graph.")
elif sys.argv[1] == "tdb":
plt.title("TD")
plt.ylabel("Mean Brightness of Image")
plt.plot(data["td_var"], data["im_brightness"], "o")
elif sys.argv[1] == "tdv":
plt.title("TD")
plt.ylabel("Variance of Brightness of Image")
plt.plot(data["td_var"], data["im_var"], "o")
elif sys.argv[1] == "asdb":
plt.title("ASD")
plt.ylabel("Mean Brightness of Image")
plt.plot(data["asd_var"], data["im_brightness"], "o")
elif sys.argv[1] == "asdv":
plt.title("ASD")
plt.ylabel("Variance of Brightness of Image")
plt.plot(data["asd_var"], data["im_var"], "o")
elif sys.argv[1] == "td":
plt.title("TD")
plt.hist(data["td_var"], 50)
elif sys.argv[1] == "asd":
plt.title("ASD")
plt.hist(data["asd_var"], 50)
elif sys.argv[1] == "combined":
plt.title("ASD and TD")
_, _, asd_hist = plt.hist(data["asd_var"], 50, alpha=0.5)
_, _, td_hist = plt.hist(data["td_var"], 50, alpha=0.5)
plt.legend(handles=[asd_hist, td_hist], labels=["ASD", "TD"])
elif sys.argv[1] == "b":
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
ax1.set_title("TD")
ax1.set_ylabel("Mean Brightness of Image")
ax1.set_xlabel("Weighted Variance of Fixmap")
ax1.plot(data["td_var"], data["im_brightness"], "o")
ax2.set_title("ASD")
ax2.set_ylabel("Mean Brightness of Image")
ax2.set_xlabel("Weighted Variance of Fixmap")
ax2.plot(data["asd_var"], data["im_brightness"], "o")
elif sys.argv[1] == "v":
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
ax1.set_title("TD")
ax1.set_ylabel("Variance of Brightness of Image")
ax1.set_xlabel("Weighted Variance of Fixmap")
ax1.plot(data["td_var"], data["im_var"], "o")
ax2.set_title("ASD")
ax2.set_ylabel("Variance of Brightness of Image")
ax2.set_xlabel("Weighted Variance of Fixmap")
ax2.plot(data["asd_var"], data["im_var"], "o")
plt.show()
| python | 2,780 |
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/pansharpen.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/pansharpen.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/pansharpen.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/pansharpen.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# Load a Landsat 8 top-of-atmosphere reflectance image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
Map.addLayer(
image,
{'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.25, 'gamma': [1.1, 1.1, 1]},
'rgb')
# Convert the RGB bands to the HSV color space.
hsv = image.select(['B4', 'B3', 'B2']).rgbToHsv()
# Swap in the panchromatic band and convert back to RGB.
sharpened = ee.Image.cat([
hsv.select('hue'), hsv.select('saturation'), image.select('B8')
]).hsvToRgb()
# Display the pan-sharpened result.
Map.setCenter(-122.44829, 37.76664, 13)
Map.addLayer(sharpened,
{'min': 0, 'max': 0.25, 'gamma': [1.3, 1.3, 1.3]},
'pan-sharpened')
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | python | 3,462 |
"""
So this is where all the SQL commands live
"""
CREATE_SQL = """
CREATE TABLE component_type (
id INT PRIMARY KEY AUTO_INCREMENT,
type VARCHAR(255) UNIQUE
);
CREATE TABLE components (
id INT PRIMARY KEY AUTO_INCREMENT,
serial_number VARCHAR(255),
sku TEXT,
type INT,
status INT,
FOREIGN KEY (type) REFERENCES component_type(id)
);
CREATE TABLE projects (
id INT PRIMARY KEY AUTO_INCREMENT,
product_number INT,
motherboard INT,
power_supply INT,
cpu INT,
hard_drive INT,
proj_case INT,
memory INT,
FOREIGN KEY (motherboard) REFERENCES components(id) ON DELETE CASCADE,
FOREIGN KEY (cpu) REFERENCES components(id) ON DELETE CASCADE,
FOREIGN KEY (power_supply) REFERENCES components(id) ON DELETE CASCADE,
FOREIGN KEY (hard_drive) REFERENCES components(id) ON DELETE CASCADE,
FOREIGN KEY (proj_case) REFERENCES components(id) ON DELETE CASCADE,
FOREIGN KEY (memory) REFERENCES components(id) ON DELETE CASCADE
);
"""
ADD_COMPONENT_TYPE = """INSERT IGNORE INTO component_type (type) VALUES ('{text}')
"""
GET_COMPONENT_TYPE="""SELECT * FROM component_type WHERE type='{text}'"""
DELETE_COMPONENT_TYPE = """DELETE FROM component_type WHERE type='{text}'
"""
SELECT_ALL_COMPONENTS = """
SELECT * FROM components INNER JOIN component_type
ON components.type = component_type.id;
"""
# Project SQL
ADD_PROJECT = "INSERT INTO projects (product_number) VALUE ('{text}')"
DELETE_PROJECT = """
DELETE FROM projects WHERE product_number='{text}'
"""
DROP_SQL = """
DROP TABLE projects;
DROP TABLE components;
DROP TABLE component_type;
"""
| python | 1,631 |
import csv
import matplotlib.pyplot as plt
import numpy as np
# Read CSV spikes and weights
spikes = np.loadtxt("spikes.csv", delimiter=",", skiprows=1,
dtype={"names": ("time", "neuron_id"),
"formats": (np.float, np.int)})
weights = np.loadtxt("weights.csv", delimiter=",", skiprows=1,
dtype={"names": ("time", "weight"),
"formats": (np.float, np.float)})
# Create plot
figure, axes = plt.subplots(3, sharex=True)
# Plot spikes
axes[0].scatter(spikes["time"], spikes["neuron_id"], s=2, edgecolors="none")
# Plot rates
bins = np.arange(0, 10000 + 1, 10)
rate = np.histogram(spikes["time"], bins=bins)[0] * (1000.0 / 10.0) * (1.0 / 2000.0)
axes[1].plot(bins[0:-1], rate)
# Plot weight evolution
axes[2].plot(weights["time"], weights["weight"])
axes[0].set_title("Spikes")
axes[1].set_title("Firing rates")
axes[2].set_title("Weight evolution")
axes[0].set_xlim((0, 10000))
axes[0].set_ylim((0, 2000))
axes[0].set_ylabel("Neuron number")
axes[1].set_ylabel("Mean firing rate [Hz]")
axes[2].set_ylabel("Mean I->E weights [nA]")
axes[2].set_xlabel("Time [ms]")
# Show plot
plt.show()
| python | 1,188 |
def to_max(dataframe):
""" Normalization method that finds the max value for each series and sets it to 1, dividing all other values
accordingly.
Useful for viewing curves on top of one another while also forcing them to have the same zero value (so that
proportionality of changes is easy to find).
Args:
dataframe: pandas.DataFrame in which each column will be normalized
Returns:
new DataFrame with normalized columns
"""
return dataframe / dataframe.max()
def by_rgb_sum(rgb_df):
""" Normalize each series in the DataFrame by dividing by the sum of the r, g, and b values in each row
Args:
rgb_df: pandas.DataFrame instance with 'r', 'g', and 'b' columns
Returns
new DataFrame with all columns normalized by r+g+b
"""
normalization_series = rgb_df["r"] + rgb_df["g"] + rgb_df["b"]
return rgb_df.apply(lambda column: column / normalization_series, axis="rows")
def r_over_b(rgb_df):
r_over_b = rgb_df["r"] / rgb_df["b"]
r_over_b.name = "r/b"
return r_over_b
| python | 1,068 |
# Copyright 2016 The Closure Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for building JavaScript Protocol Buffers.
"""
load("//closure/compiler:closure_js_library.bzl", "closure_js_library")
def _collect_includes(srcs):
includes = ["."]
for src in srcs:
include = ""
if src.startswith("@"):
include = Label(src).workspace_root
if include and not include in includes:
includes += [include]
return includes
def closure_js_proto_library(
name,
srcs,
suppress = [],
add_require_for_enums = 0,
testonly = None,
binary = 1,
import_style = None,
protocbin = Label("@com_google_protobuf//:protoc"),
**kwargs):
cmd = ["$(location %s)" % protocbin]
js_out_options = ["library=%s,error_on_name_conflict" % name]
if add_require_for_enums:
js_out_options += ["add_require_for_enums"]
if testonly:
js_out_options += ["testonly"]
if binary:
js_out_options += ["binary"]
if import_style:
js_out_options += ["import_style=%s" % import_style]
cmd += ["-I%s" % i for i in _collect_includes(srcs)]
cmd += ["--js_out=%s:$(@D)" % ",".join(js_out_options)]
cmd += ["--descriptor_set_out=$(@D)/%s.descriptor" % name]
cmd += ["$(locations " + src + ")" for src in srcs]
native.genrule(
name = name + "_gen",
srcs = srcs,
testonly = testonly,
visibility = ["//visibility:private"],
message = "Generating JavaScript Protocol Buffer file",
outs = [name + ".js", name + ".descriptor"],
tools = [protocbin],
cmd = " ".join(cmd),
)
closure_js_library(
name = name,
srcs = [name + ".js"],
testonly = testonly,
deps = [
str(Label("//closure/library/array")),
str(Label("//closure/protobuf:jspb")),
],
internal_descriptors = [name + ".descriptor"],
suppress = suppress + [
"missingProperties",
"unusedLocalVariables",
],
lenient = True,
**kwargs
)
| python | 2,683 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import HTML
from IPython.display import Image
HTML("""
<style>
.output_png {
display: table-cell;
text-align: center;
vertical-align: middle;
}
</style>
""")
# # *Circuitos Elétricos I - Semana 4*
# ## Transformações de fontes
#
# Uma fonte de tensão ideal de valor $V_s$ conectada em série com um resistor $R$ pode ser substituída por uma fonte de corrente ideal de valor $I_s$ conectada em paralelo com uma resistência $R$, e vice-versa. Estas substituições não alterarão o comportamento dos demais elementos do circuito desde que $V_s=RI_s$.
#
# <img src=./figures/J5C0.png width=600>
#
# ## Deslocamentos de fontes
#
# * **Deslocamento de fontes de tensão**: conectar fontes ideais de tensão em série com todos os elementos ideais de dois terminais ligados a um nó, com polaridades adequadas, não altera as equações que descrevem o comportamento do circuito.
#
# * **Deslocamento de fontes de corrente**: conectar um laço de fontes ideais de corrente iguais e de mesmo sentido num circuito não altera as equações que descrevem o comportamento do circuito.
# ### Problema 1
#
# Determine a corrente que passa pelo resistor de $25~\Omega$ aplicando transformações e deslocamentos de fontes.
Image("./figures/J5C1.png", width=500)
# Simulação do circuito: https://tinyurl.com/ydk42vvn
| python | 1,652 |
import errno
from io import StringIO
from unittest.mock import patch
from django.core.management import call_command
from django.test import TestCase
from aiodjango.management.commands.runserver import Command
class RunserverTestCase(TestCase):
"""Development server command options."""
def setUp(self):
self.stdout = StringIO()
self.stderr = StringIO()
self.cmd = Command(stdout=self.stdout, stderr=self.stderr)
def assert_option(self, name, value):
self.assertEqual(getattr(self.cmd, name), value)
def assert_stderr(self, message):
self.stderr.seek(0)
self.assertIn(message, self.stderr.read())
def test_default_options(self):
"""Deifault options for running the server."""
with patch.object(self.cmd, 'run'):
call_command(self.cmd)
self.assert_option('addr', '127.0.0.1')
self.assert_option('port', '8000')
self.assert_option('use_ipv6', False)
def test_set_ip(self):
"""Run server on another IP address/port."""
with patch.object(self.cmd, 'run'):
call_command(self.cmd, addrport='1.2.3.4:5000')
self.assert_option('addr', '1.2.3.4')
self.assert_option('port', '5000')
self.assert_option('use_ipv6', False)
@patch('asyncio.get_event_loop')
def test_run(self, mock_loop):
"""Running the server should kick off the aiohttp app in the event loop."""
call_command(self.cmd, use_reloader=False)
mock_loop.assert_called_with()
mock_loop.return_value.run_forever.assert_called_with()
@patch('asyncio.set_event_loop')
@patch('asyncio.new_event_loop')
def test_auto_reloader(self, mock_loop, mock_set_loop):
"""Running with the reloader thread creates a new event loop."""
# Need to setup command options and use inner_run to prevent the
# background thread from actually kicking off.
self.cmd.addr = '127.0.0.1'
self.cmd.port = '8000'
self.cmd._raw_ipv6 = False
self.cmd.inner_run(use_reloader=True, use_static_handler=False, insecure_serving=True)
mock_loop.assert_called_with()
mock_set_loop.assert_called_with(mock_loop.return_value)
mock_loop.return_value.run_forever.assert_called_with()
@patch('asyncio.get_event_loop')
def test_handle_general_socket_errors(self, mock_loop):
"""Handle socket errors when createing the server."""
mock_loop.return_value.create_server.side_effect = OSError('OS is broken')
with patch('os._exit') as mock_exit:
call_command(self.cmd, use_reloader=False)
mock_exit.assert_called_with(1)
self.assert_stderr('OS is broken')
@patch('asyncio.get_event_loop')
def test_handle_known_socket_errors(self, mock_loop):
"""Special case socket errors for more meaningful error messages."""
cases = (
(errno.EACCES, 'You don\'t have permission to access that port.'),
(errno.EADDRINUSE, 'That port is already in use.'),
(errno.EADDRNOTAVAIL, 'That IP address can\'t be assigned to.'),
)
for number, message in cases:
error = OSError()
error.errno = number
mock_loop.return_value.create_server.side_effect = error
with patch('os._exit') as mock_exit:
call_command(self.cmd, use_reloader=False)
mock_exit.assert_called_with(1)
self.assert_stderr(message)
@patch('asyncio.get_event_loop')
def test_keyboard_stop(self, mock_loop):
"""User should be able to stop the server with a KeyboardInterrupt."""
mock_loop.return_value.run_forever.side_effect = KeyboardInterrupt
with self.assertRaises(SystemExit):
call_command(self.cmd, use_reloader=False)
| python | 3,862 |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Update Workflow data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator3086C9624F498B85(object):
"""Update Workflow request schema definition."""
def __init__(self):
super(JSONSchemaValidator3086C9624F498B85, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"_id": {
"type": [
"string",
"null"
]
},
"addToInventory": {
"type": [
"boolean",
"null"
]
},
"addedOn": {
"type": [
"number",
"null"
]
},
"configId": {
"type": [
"string",
"null"
]
},
"currTaskIdx": {
"type": [
"number",
"null"
]
},
"description":
{
"type": [
"string",
"null"
]
},
"endTime": {
"type": [
"number",
"null"
]
},
"execTime": {
"type": [
"number",
"null"
]
},
"imageId": {
"type": [
"string",
"null"
]
},
"instanceType": {
"type": [
"string",
"null"
]
},
"lastupdateOn": {
"type": [
"number",
"null"
]
},
"name": {
"type": [
"string",
"null"
]
},
"startTime": {
"type": [
"number",
"null"
]
},
"state": {
"type": [
"string",
"null"
]
},
"tasks": {
"items": {
"properties": {
"currWorkItemIdx": {
"type": [
"number",
"null"
]
},
"endTime": {
"type": [
"number",
"null"
]
},
"name": {
"type": [
"string",
"null"
]
},
"startTime": {
"type": [
"number",
"null"
]
},
"state": {
"type": [
"string",
"null"
]
},
"taskSeqNo": {
"type": [
"number",
"null"
]
},
"timeTaken": {
"type": [
"number",
"null"
]
},
"type": {
"type": [
"string",
"null"
]
},
"workItemList": {
"items": {
"properties": {
"command": {
"type": [
"string",
"null"
]
},
"endTime": {
"type": [
"number",
"null"
]
},
"outputStr": {
"type": [
"string",
"null"
]
},
"startTime": {
"type": [
"number",
"null"
]
},
"state": {
"type": [
"string",
"null"
]
},
"timeTaken": {
"type": [
"number",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": [
"array",
"null"
]
},
"tenantId": {
"type": [
"string",
"null"
]
},
"type": {
"type": [
"string",
"null"
]
},
"useState": {
"type": [
"string",
"null"
]
},
"version": {
"type": [
"number",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| python | 7,270 |
import re
from abc import abstractmethod, ABCMeta
from collections import defaultdict
from functools import partial
import numpy as np
import tables
from astropy.time import Time
from astropy.units import Quantity
import ctapipe
from ctapipe.core import Component
__all__ = ['TableWriter',
'TableReader',
'HDF5TableWriter',
'HDF5TableReader']
PYTABLES_TYPE_MAP = {
'float': tables.Float64Col,
'float64': tables.Float64Col,
'float32': tables.Float32Col,
'int': tables.IntCol,
'int32': tables.Int32Col,
'int64': tables.Int64Col,
'bool': tables.BoolCol,
}
class TableWriter(Component, metaclass=ABCMeta):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self._transforms = defaultdict(dict)
self._exclusions = defaultdict(list)
def exclude(self, table_name, pattern):
"""
Exclude any columns matching the pattern from being written
Parameters
----------
table_name: str
name of table on which to apply the exclusion
pattern: str
regular expression string to match column name
"""
self._exclusions[table_name].append(re.compile(pattern))
def _is_column_excluded(self, table_name, col_name):
for pattern in self._exclusions[table_name]:
if pattern.match(col_name):
return True
return False
def add_column_transform(self, table_name, col_name, transform):
"""
Add a transformation function for a column. This function will be
called on the value in the container before it is written to the
output file.
Parameters
----------
table_name: str
identifier of table being written
col_name: str
name of column in the table (or item in the Container)
transform: callable
function that take a value and returns a new one
"""
self._transforms[table_name][col_name] = transform
self.log.debug("Added transform: {}/{} -> {}".format(table_name,
col_name,
transform))
@abstractmethod
def write(self, table_name, container):
"""
Write the contents of the given container to a table. The first call
to write will create a schema and initialize the table within the
file. The shape of data within the container must not change between
calls, since variable-length arrays are not supported.
Parameters
----------
table_name: str
name of table to write to
container: `ctapipe.core.Container`
container to write
"""
pass
def _apply_col_transform(self, table_name, col_name, value):
"""
apply value transform function if it exists for this column
"""
if col_name in self._transforms[table_name]:
tr = self._transforms[table_name][col_name]
value = tr(value)
return value
class HDF5TableWriter(TableWriter):
"""
A very basic table writer that can take a container (or more than one)
and write it to an HDF5 file. It does _not_ recursively write the
container. This is intended as a building block to create a more complex
I/O system.
It works by creating a HDF5 Table description from the `Field`s inside a
container, where each item becomes a column in the table. The first time
`SimpleHDF5TableWriter.write()` is called, the container is registered
and the table created in the output file.
Each item in the container can also have an optional transform function
that is called before writing to transform the value. For example,
unit quantities always have their units removed, or converted to a
common unit if specified in the `Field`.
Any metadata in the `Container` (stored in `Container.meta`) will be
written to the table's header on the first call to write()
Multiple tables may be written at once in a single file, as long as you
change the table_name attribute to write() to specify which one to write
to.
TODO:
- ability to write several containers to the same table (appending a
string to each column name). Perhaps `write(name, dict(method_a=cont,
method_b=cont2))`, where "method_a_X" would be a column name. May be
possible to do this with some container magic, like joining two
containers `joined_container(cont1, cont2, "A", "B")` or "cont1+cont2".
Perhaps need to provide a better way to get container contents as a
dictionary.
Parameters
----------
filename: str
name of hdf5 output file
group_name: str
name of group into which to put all of the tables generated by this
Writer (it will be placed under "/" in the file)
"""
def __init__(self, filename, group_name, **kwargs):
super().__init__()
self._schemas = {}
self._tables = {}
self._h5file = tables.open_file(filename, mode="w", **kwargs)
self._group = self._h5file.create_group("/", group_name)
self.log.debug("h5file: {}".format(self._h5file))
def __del__(self):
self._h5file.close()
def _create_hdf5_table_schema(self, table_name, container):
"""
Creates a pytables description class for a container
and registers it in the Writer
Parameters
----------
table_name: str
name of table
container: ctapipe.core.Container
instance of an initalized container
Returns
-------
dictionary of extra metadata to add to the table's header
"""
class Schema(tables.IsDescription):
pass
meta = {} # any extra meta-data generated here (like units, etc)
# create pytables schema description for the given container
for col_name, value in container.items():
typename = ""
shape = 1
if self._is_column_excluded(table_name, col_name):
self.log.debug("excluded column: {}/{}".format(table_name,
col_name))
continue
if isinstance(value, Quantity):
req_unit = container.fields[col_name].unit
if req_unit is not None:
tr = partial(tr_convert_and_strip_unit, unit=req_unit)
meta['{}_UNIT'.format(col_name)] = str(req_unit)
else:
tr = lambda x: x.value
meta['{}_UNIT'.format(col_name)] = str(value.unit)
value = tr(value)
self.add_column_transform(table_name, col_name, tr)
if isinstance(value, np.ndarray):
typename = value.dtype.name
coltype = PYTABLES_TYPE_MAP[typename]
shape = value.shape
Schema.columns[col_name] = coltype(shape=shape)
if isinstance(value, Time):
# TODO: really should use MET, but need a func for that
Schema.columns[col_name] = tables.Float64Col()
self.add_column_transform(table_name, col_name,
tr_time_to_float)
elif type(value).__name__ in PYTABLES_TYPE_MAP:
typename = type(value).__name__
coltype = PYTABLES_TYPE_MAP[typename]
Schema.columns[col_name] = coltype()
self.log.debug("Table {}: added col: {} type: {} shape: {}"
.format(table_name, col_name, typename, shape))
self._schemas[table_name] = Schema
meta['CTAPIPE_VERSION'] = ctapipe.__version__
return meta
def _setup_new_table(self, table_name, container):
""" set up the table. This is called the first time `write()`
is called on a new table """
self.log.debug("Initializing table '{}'".format(table_name))
meta = self._create_hdf5_table_schema(table_name, container)
meta.update(container.meta) # copy metadata from container
table = self._h5file.create_table(where=self._group,
name=table_name,
title="storage of {}".format(
container.__class__.__name__),
description=self._schemas[table_name])
for key, val in meta.items():
table.attrs[key] = val
self._tables[table_name] = table
def _append_row(self, table_name, container):
"""
append a row to an already initialized table. This is called
automatically by `write()`
"""
table = self._tables[table_name]
row = table.row
for colname in table.colnames:
value = self._apply_col_transform(table_name, colname,
container[colname])
row[colname] = value
row.append()
def write(self, table_name, container):
"""
Write the contents of the given container to a table. The first call
to write will create a schema and initialize the table within the
file. The shape of data within the container must not change between
calls, since variable-length arrays are not supported.
Parameters
----------
table_name: str
name of table to write to
container: `ctapipe.core.Container`
container to write
"""
if table_name not in self._schemas:
self._setup_new_table(table_name, container)
self._append_row(table_name, container)
class TableReader(Component, metaclass=ABCMeta):
"""
Base class for row-wise table readers. Generally methods that read a
full table at once are preferred to this method, since they are faster,
but this can be used to re-play a table row by row into a
`ctapipe.core.Container` class (the opposite of TableWriter)
"""
def __init__(self):
super().__init__()
self._cols_to_read = defaultdict(list)
self._transforms = defaultdict(dict)
def add_column_transform(self, table_name, col_name, transform):
"""
Add a transformation function for a column. This function will be
called on the value in the container before it is written to the
output file.
Parameters
----------
table_name: str
identifier of table being written
col_name: str
name of column in the table (or item in the Container)
transform: callable
function that take a value and returns a new one
"""
self._transforms[table_name][col_name] = transform
self.log.debug("Added transform: {}/{} -> {}".format(table_name,
col_name,
transform))
def _apply_col_transform(self, table_name, col_name, value):
"""
apply value transform function if it exists for this column
"""
if col_name in self._transforms[table_name]:
tr = self._transforms[table_name][col_name]
value = tr(value)
return value
@abstractmethod
def read(self, table_name, container):
"""
Returns a generator that reads the next row from the table into the
given container. The generator returns the same container. Note that
no containers are copied, the data are overwritten inside.
"""
pass
class HDF5TableReader(TableReader):
"""
Reader that reads a single row of an HDF5 table at once into a Container.
Simply construct a `HDF5TableReader` with an input HDF5 file,
and call the `read(path, container)` method to get a generator that fills
the given container with a new row of the table on each access.
Columns in the table are automatically mapped to container fields by
name, and if a field is missing in either, it is skipped during read,
but a warning is emitted.
Columns that were written by SimpleHDF5TableWriter and which had unit
transforms applied, will have the units re-applied when reading (the
unit used is stored in the header attributes).
Note that this is only useful if you want to read all information *one
event at a time* into a container, which is not very I/O efficient. For
some other use cases, it may be much more efficient to access the
table data directly, for example to read an entire column or table at
once (which means not using the Container data structure).
Todo:
- add ability to synchronize reading of multiple tables on a key
- add ability (also with TableWriter) to read a row into n containers at
once, assuming no naming conflicts (so we can add e.g. event_id)
"""
def __init__(self, filename):
"""
Parameters
----------
filename: str
name of hdf5 file
group_name: str
HDF5 path to group where tables are to be found
"""
super().__init__()
self._tables = {}
self._h5file = tables.open_file(filename)
pass
def _setup_table(self, table_name, container):
tab = self._h5file.get_node(table_name)
self._tables[table_name] = tab
self._map_table_to_container(table_name, container)
self._map_transforms_from_table_header(table_name)
return tab
def _map_transforms_from_table_header(self, table_name):
"""
create any transforms needed to "undo" ones in the writer
"""
tab = self._tables[table_name]
for attr in tab.attrs._f_list():
if attr.endswith("_UNIT"):
colname = attr[:-5]
tr = partial(tr_add_unit, unitname=tab.attrs[attr])
self.add_column_transform(table_name, colname, tr)
def _map_table_to_container(self, table_name, container):
""" identifies which columns in the table to read into the container,
by comparing their names."""
tab = self._tables[table_name]
for colname in tab.colnames:
if colname in container.fields:
self._cols_to_read[table_name].append(colname)
else:
self.log.warn("Table '{}' has column '{}' that is not in "
"container {}. It will be skipped"
.format(table_name, colname,
container.__class__.__name__))
# also check that the container doesn't have fields that are not
# in the table:
for colname in container.fields:
if colname not in self._cols_to_read[table_name]:
self.log.warn("Table '{}' is missing column '{}' that is "
"in container {}. It will be skipped"
.format(table_name, colname,
container.__class__.__name__))
# copy all user-defined attributes back to Container.mets
for key in tab.attrs._f_list():
container.meta[key] = tab.attrs[key]
def read(self, table_name, container):
"""
Returns a generator that reads the next row from the table into the
given container. The generator returns the same container. Note that
no containers are copied, the data are overwritten inside.
"""
if table_name not in self._tables:
tab = self._setup_table(table_name, container)
else:
tab = self._tables[table_name]
row_count = 0
while 1:
try:
row = tab[row_count]
except IndexError:
return # stop generator when done
for colname in self._cols_to_read[table_name]:
container[colname] = self._apply_col_transform(table_name,
colname,
row[colname])
yield container
row_count += 1
def tr_convert_and_strip_unit(quantity, unit):
return quantity.to(unit).value
def tr_list_to_mask(thelist, length):
""" transform list to a fixed-length mask"""
arr = np.zeros(shape=length, dtype=np.bool)
arr[thelist] = True
return arr
def tr_time_to_float(thetime):
return thetime.mjd
def tr_add_unit(value, unitname):
return Quantity(value, unitname)
| python | 16,910 |
"""
Mask R-CNN
Display and Visualization Functions.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import random
import itertools
import colorsys
import numpy as np
from skimage.measure import find_contours
from skimage import io
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import IPython.display
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
############################################################
# Visualization
############################################################
def display_images(images, titles=None, cols=4, cmap=None, norm=None,
interpolation=None):
"""Display the given set of images, optionally with titles.
images: list or array of image tensors in HWC format.
titles: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interpolation to use for display.
"""
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(14, 14 * rows // cols))
i = 1
for image, title in zip(images, titles):
plt.subplot(rows, cols, i)
plt.title(title, fontsize=9)
plt.axis('off')
plt.imshow(image.astype(np.uint8), cmap=cmap,
norm=norm, interpolation=interpolation)
i += 1
plt.show()
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.savefig(r"C:\Users\Bang\Desktop\CMNDDetection\Mask-RCNN-CMND")
plt.show()
def display_differences(image,
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
class_names, title="", ax=None,
show_mask=True, show_box=True,
iou_threshold=0.5, score_threshold=0.5):
"""Display ground truth and prediction instances on the same image."""
# Match predictions to ground truth
gt_match, pred_match, overlaps = utils.compute_matches(
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold, score_threshold=score_threshold)
# Ground truth = green. Predictions = red
colors = [(0, 1, 0, .8)] * len(gt_match)\
+ [(1, 0, 0, 1)] * len(pred_match)
# Concatenate GT and predictions
class_ids = np.concatenate([gt_class_id, pred_class_id])
scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])
boxes = np.concatenate([gt_box, pred_box])
masks = np.concatenate([gt_mask, pred_mask], axis=-1)
# Captions per instance show score/IoU
captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format(
pred_score[i],
(overlaps[i, int(pred_match[i])]
if pred_match[i] > -1 else overlaps[i].max()))
for i in range(len(pred_match))]
# Set title if not provided
title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU"
# Display
display_instances(
image,
boxes, masks, class_ids,
class_names, scores, ax=ax,
show_bbox=show_box, show_mask=show_mask,
colors=colors, captions=captions,
title=title)
def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
"""
anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
proposals: [n, 4] the same anchors but refined to fit objects better.
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
ids = np.arange(rois.shape[0], dtype=np.int32)
ids = np.random.choice(
ids, limit, replace=False) if ids.shape[0] > limit else ids
fig, ax = plt.subplots(1, figsize=(12, 12))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
ax.axis('off')
for i, id in enumerate(ids):
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
if class_id:
ry1, rx1, ry2, rx2 = refined_rois[id]
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
ax.text(rx1, ry1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
# Mask
m = utils.unmold_mask(mask[id], rois[id]
[:4].astype(np.int32), image.shape)
masked_image = apply_mask(masked_image, m, color)
ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
# TODO: Replace with matplotlib equivalent?
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 2, x1:x2] = color
image[y2:y2 + 2, x1:x2] = color
image[y1:y2, x1:x1 + 2] = color
image[y1:y2, x2:x2 + 2] = color
return image
def display_top_masks(image, mask, class_ids, class_names, limit=4):
"""Display the given image and the top few class masks."""
to_display = []
titles = []
to_display.append(image)
titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
# Pick top prominent classes in this image
unique_class_ids = np.unique(class_ids)
mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
for i in unique_class_ids]
top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
key=lambda r: r[1], reverse=True) if v[1] > 0]
# Generate images and titles
for i in range(limit):
class_id = top_ids[i] if i < len(top_ids) else -1
# Pull masks of instances belonging to the same class.
m = mask[:, :, np.where(class_ids == class_id)[0]]
m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)
to_display.append(m)
titles.append(class_names[class_id] if class_id != -1 else "-")
display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r")
def plot_precision_recall(AP, precisions, recalls):
"""Draw the precision-recall curve.
AP: Average precision at IoU >= 0.5
precisions: list of precision values
recalls: list of recall values
"""
# Plot the Precision-Recall curve
_, ax = plt.subplots(1)
ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
ax.set_ylim(0, 1.1)
ax.set_xlim(0, 1.1)
_ = ax.plot(recalls, precisions)
def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,
overlaps, class_names, threshold=0.5):
"""Draw a grid showing how ground truth objects are classified.
gt_class_ids: [N] int. Ground truth class IDs
pred_class_id: [N] int. Predicted class IDs
pred_scores: [N] float. The probability scores of predicted classes
overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.
class_names: list of all class names in the dataset
threshold: Float. The prediction probability required to predict a class
"""
gt_class_ids = gt_class_ids[gt_class_ids != 0]
pred_class_ids = pred_class_ids[pred_class_ids != 0]
plt.figure(figsize=(12, 10))
plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)
plt.yticks(np.arange(len(pred_class_ids)),
["{} ({:.2f})".format(class_names[int(id)], pred_scores[i])
for i, id in enumerate(pred_class_ids)])
plt.xticks(np.arange(len(gt_class_ids)),
[class_names[int(id)] for id in gt_class_ids], rotation=90)
thresh = overlaps.max() / 2.
for i, j in itertools.product(range(overlaps.shape[0]),
range(overlaps.shape[1])):
text = ""
if overlaps[i, j] > threshold:
text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong"
color = ("white" if overlaps[i, j] > thresh
else "black" if overlaps[i, j] > 0
else "grey")
plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text),
horizontalalignment="center", verticalalignment="center",
fontsize=9, color=color)
plt.tight_layout()
plt.xlabel("Ground Truth")
plt.ylabel("Predictions")
def draw_boxes(image, boxes=None, refined_boxes=None,
masks=None, captions=None, visibilities=None,
title="", ax=None):
"""Draw bounding boxes and segmentation masks with different
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
masks: [N, height, width]
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominent each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
"""
# Number of boxes
assert boxes is not None or refined_boxes is not None
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(12, 12))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
# Masks
if masks is not None:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
def display_table(table):
"""Display values in a table format.
table: an iterable of rows, and each row is an iterable of values.
"""
html = ""
for row in table:
row_html = ""
for col in row:
row_html += "<td>{:40}</td>".format(str(col))
html += "<tr>" + row_html + "</tr>"
html = "<table>" + html + "</table>"
IPython.display.display(IPython.display.HTML(html))
def display_weight_stats(model):
"""Scans all the weights in the model and returns a list of tuples
that contain stats about each weight.
"""
layers = model.get_trainable_layers()
table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]]
for l in layers:
weight_values = l.get_weights() # list of Numpy arrays
weight_tensors = l.weights # list of TF tensors
for i, w in enumerate(weight_values):
weight_name = weight_tensors[i].name
# Detect problematic layers. Exclude biases of conv layers.
alert = ""
if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1):
alert += "<span style='color:red'>*** dead?</span>"
if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:
alert += "<span style='color:red'>*** Overflow?</span>"
# Add row
table.append([
weight_name + alert,
str(w.shape),
"{:+9.4f}".format(w.min()),
"{:+10.4f}".format(w.max()),
"{:+9.4f}".format(w.std()),
])
display_table(table)
| python | 19,072 |
import unittest
import numpy as np
import tensorflow as tf
from autoencoder_tf.encoder import Network
from autoencoder_tf.utils import batch_generator
class TestMlp(unittest.TestCase):
def test_weights_shapes(self):
mlp = Network(32)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
self.assertEqual(mlp.hidden_weights.get_shape(), tf.TensorShape([tf.Dimension(32), tf.Dimension(784)]))
self.assertEqual(mlp.output_weights.get_shape(), tf.TensorShape([tf.Dimension(784), tf.Dimension(32)]))
def test_bias_shapes(self):
mlp = Network(32)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
self.assertEqual(mlp.hidden_bias.get_shape(), tf.TensorShape([tf.Dimension(32), tf.Dimension(1)]))
self.assertEqual(mlp.output_bias.get_shape(), tf.TensorShape([tf.Dimension(784), tf.Dimension(1)]))
def test_feed_forward_single_sample(self):
mlp = Network(32)
data = np.random.rand(784, 1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
hidden_net, hidden_activation, hidden_activation_derivative, \
output_net, output_activation, output_activation_derivative = sess.run([mlp.hidden_net,
mlp.hidden_activation,
mlp.hidden_activation_derivative,
mlp.output_net,
mlp.output_activation,
mlp.output_activation_derivative],
feed_dict={mlp.input: data})
self.assertEqual(hidden_net.shape, (32, 1))
self.assertEqual(hidden_activation.shape, (32, 1))
self.assertEqual(hidden_activation_derivative.shape, (32, 1))
self.assertEqual(output_net.shape, (784, 1))
self.assertEqual(output_activation.shape, (784, 1))
self.assertEqual(output_activation_derivative.shape, (784, 1))
def test_feed_forward_batch_sample(self):
mlp = Network(32)
data = np.random.rand(784, 200)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
hidden_net, hidden_activation, hidden_activation_derivative, \
output_net, output_activation, output_activation_derivative = sess.run([mlp.hidden_net,
mlp.hidden_activation,
mlp.hidden_activation_derivative,
mlp.output_net,
mlp.output_activation,
mlp.output_activation_derivative],
feed_dict={mlp.input: data})
self.assertEqual(hidden_net.shape, (32, 200))
self.assertEqual(hidden_activation.shape, (32, 200))
self.assertEqual(hidden_activation_derivative.shape, (32, 200))
self.assertEqual(output_net.shape, (784, 200))
self.assertEqual(output_activation.shape, (784, 200))
self.assertEqual(output_activation_derivative.shape, (784, 200))
def test_propagate_backward_single_sample(self):
mlp = Network(32)
data = np.random.rand(784, 1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
output_error, hidden_error = sess.run([mlp.output_error, mlp.hidden_error], feed_dict={mlp.input: data})
self.assertEqual(output_error.shape, (784, 1))
self.assertEqual(hidden_error.shape, (32, 1))
def test_propagate_backward_batch_sample(self):
mlp = Network(32)
data = np.random.rand(784, 200)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
output_error, hidden_error = sess.run([mlp.output_error, mlp.hidden_error], feed_dict={mlp.input: data})
self.assertEqual(output_error.shape, (784, 200))
self.assertEqual(hidden_error.shape, (32, 200))
def test_calculate_gradients_single_sample(self):
mlp = Network(32)
data = np.random.rand(784, 1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
output_weights_gradient, hidden_weights_gradient, output_bias_gradient, hidden_bias_gradient = \
sess.run([mlp.output_weights_gradient, mlp.hidden_weights_gradient,
mlp.output_bias_gradient, mlp.hidden_bias_gradient], feed_dict={mlp.input: data})
self.assertEqual(output_weights_gradient.shape, (784, 32))
self.assertEqual(hidden_weights_gradient.shape, (32, 784))
self.assertEqual(output_bias_gradient.shape, (784, 1))
self.assertEqual(hidden_bias_gradient.shape, (32, 1))
def test_calculate_gradients_batch_sample(self):
mlp = Network(32)
data = np.random.rand(784, 200)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
output_weights_gradient, hidden_weights_gradient, output_bias_gradient, hidden_bias_gradient = \
sess.run([mlp.output_weights_gradient, mlp.hidden_weights_gradient,
mlp.output_bias_gradient, mlp.hidden_bias_gradient], feed_dict={mlp.input: data})
self.assertEqual(output_weights_gradient.shape, (784, 32))
self.assertEqual(hidden_weights_gradient.shape, (32, 784))
self.assertEqual(output_bias_gradient.shape, (784, 1))
self.assertEqual(hidden_bias_gradient.shape, (32, 1))
def test_batch_generator_single_sample(self):
data = np.random.rand(784, 200)
batch_size = 1
for batch in batch_generator(data, batch_size):
self.assertEqual(batch.shape, (784, 1))
def test_batch_generator_batch_sample(self):
data = np.random.rand(784, 200)
batch_size = 64
for batch in batch_generator(data, batch_size):
self.assertEqual(batch.shape, (784, 64))
def test_train_single_sample(self):
mlp = Network(32)
data = np.random.rand(784, 200)
batch_size = 1
learning_rate = 0.01
epochs = 2
l2 = 0.0001
# mlp.train(data, learning_rate, epochs, batch_size, l2)
def test_train_batch_sample(self):
mlp = Network(32)
data = np.random.rand(784, 200)
batch_size = 2
learning_rate = 0.01
epochs = 2
l2 = 0.0001
# mlp.train(data, learning_rate, epochs, batch_size, l2)
if __name__ == '__main__':
unittest.main()
| python | 7,489 |
def convert_fasta_to_string(filename):
"""Takes a genome FASTA and outputs a string of that genome
Args:
filename: fasta file
Returns:
string of the genome sequence
"""
assert filename.split('.')[-1] == 'fasta' # assert correct file type
with open(filename) as f:
sequence = ''.join(f.read().split('\n')[1:]).lower() # splits by lines, removes first line, joins lines
return sequence
| python | 438 |
#Um programa que calcula o dobro, o triplo e a raiz quadrada de um número
n = int(input('Digite um número... '))
print('O numéro que você escolheu é {}, o seu dobro é {}, o triplo é {}, e a raiz quadrada {:.2f}.' .format(n,(n*2),(n*3),pow(n, (1/2)))) | python | 252 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'print_options06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print areaand a repeat rows"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area('A1:G20')
worksheet.repeat_rows(0)
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
| python | 1,375 |
#
# The Python Imaging Library
# $Id$
#
# JPEG2000 file handling
#
# History:
# 2014-03-12 ajh Created
#
# Copyright (c) 2014 Coriolis Systems Limited
# Copyright (c) 2014 Alastair Houghton
#
# See the README file for information on usage and redistribution.
#
import io
import os
import struct
from . import Image, ImageFile
def _parse_codestream(fp):
"""Parse the JPEG 2000 codestream to extract the size and component
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
hdr = fp.read(2)
lsiz = struct.unpack(">H", hdr)[0]
siz = hdr + fp.read(lsiz - 2)
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from(
">HHIIIIIIIIH", siz
)
ssiz = [None] * csiz
xrsiz = [None] * csiz
yrsiz = [None] * csiz
for i in range(csiz):
ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(">BBB", siz, 36 + 3 * i)
size = (xsiz - xosiz, ysiz - yosiz)
if csiz == 1:
if (yrsiz[0] & 0x7F) > 8:
mode = "I;16"
else:
mode = "L"
elif csiz == 2:
mode = "LA"
elif csiz == 3:
mode = "RGB"
elif csiz == 4:
mode = "RGBA"
else:
mode = None
return (size, mode)
def _parse_jp2_header(fp):
"""Parse the JP2 header box to extract size, component count and
color space information, returning a (size, mode, mimetype) tuple."""
# Find the JP2 header box
header = None
mimetype = None
while True:
lbox, tbox = struct.unpack(">I4s", fp.read(8))
if lbox == 1:
lbox = struct.unpack(">Q", fp.read(8))[0]
hlen = 16
else:
hlen = 8
if lbox < hlen:
raise SyntaxError("Invalid JP2 header length")
if tbox == b"jp2h":
header = fp.read(lbox - hlen)
break
elif tbox == b"ftyp":
if fp.read(4) == b"jpx ":
mimetype = "image/jpx"
fp.seek(lbox - hlen - 4, os.SEEK_CUR)
else:
fp.seek(lbox - hlen, os.SEEK_CUR)
if header is None:
raise SyntaxError("could not find JP2 header")
size = None
mode = None
bpc = None
nc = None
hio = io.BytesIO(header)
while True:
lbox, tbox = struct.unpack(">I4s", hio.read(8))
if lbox == 1:
lbox = struct.unpack(">Q", hio.read(8))[0]
hlen = 16
else:
hlen = 8
content = hio.read(lbox - hlen)
if tbox == b"ihdr":
height, width, nc, bpc, c, unkc, ipr = struct.unpack(">IIHBBBB", content)
size = (width, height)
if unkc:
if nc == 1 and (bpc & 0x7F) > 8:
mode = "I;16"
elif nc == 1:
mode = "L"
elif nc == 2:
mode = "LA"
elif nc == 3:
mode = "RGB"
elif nc == 4:
mode = "RGBA"
break
elif tbox == b"colr":
meth, prec, approx = struct.unpack_from(">BBB", content)
if meth == 1:
cs = struct.unpack_from(">I", content, 3)[0]
if cs == 16: # sRGB
if nc == 1 and (bpc & 0x7F) > 8:
mode = "I;16"
elif nc == 1:
mode = "L"
elif nc == 3:
mode = "RGB"
elif nc == 4:
mode = "RGBA"
break
elif cs == 17: # grayscale
if nc == 1 and (bpc & 0x7F) > 8:
mode = "I;16"
elif nc == 1:
mode = "L"
elif nc == 2:
mode = "LA"
break
elif cs == 18: # sYCC
if nc == 3:
mode = "RGB"
elif nc == 4:
mode = "RGBA"
break
if size is None or mode is None:
raise SyntaxError("Malformed jp2 header")
return (size, mode, mimetype)
##
# Image plugin for JPEG2000 images.
class Jpeg2KImageFile(ImageFile.ImageFile):
format = "JPEG2000"
format_description = "JPEG 2000 (ISO 15444)"
def _open(self):
sig = self.fp.read(4)
if sig == b"\xff\x4f\xff\x51":
self.codec = "j2k"
self._size, self.mode = _parse_codestream(self.fp)
else:
sig = sig + self.fp.read(8)
if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a":
self.codec = "jp2"
header = _parse_jp2_header(self.fp)
self._size, self.mode, self.custom_mimetype = header
else:
raise SyntaxError("not a JPEG 2000 file")
if self.size is None or self.mode is None:
raise SyntaxError("unable to determine size/mode")
self._reduce = 0
self.layers = 0
fd = -1
length = -1
try:
fd = self.fp.fileno()
length = os.fstat(fd).st_size
except Exception:
fd = -1
try:
pos = self.fp.tell()
self.fp.seek(0, io.SEEK_END)
length = self.fp.tell()
self.fp.seek(pos)
except Exception:
length = -1
self.tile = [
(
"jpeg2k",
(0, 0) + self.size,
0,
(self.codec, self._reduce, self.layers, fd, length),
)
]
@property
def reduce(self):
# https://github.com/python-pillow/Pillow/issues/4343 found that the
# new Image 'reduce' method was shadowed by this plugin's 'reduce'
# property. This attempts to allow for both scenarios
return self._reduce or super().reduce
@reduce.setter
def reduce(self, value):
self._reduce = value
def load(self):
if self.tile and self._reduce:
power = 1 << self._reduce
adjust = power >> 1
self._size = (
int((self.size[0] + adjust) / power),
int((self.size[1] + adjust) / power),
)
# Update the reduce and layers settings
t = self.tile[0]
t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4])
self.tile = [(t[0], (0, 0) + self.size, t[2], t3)]
return ImageFile.ImageFile.load(self)
def _accept(prefix):
return (
prefix[:4] == b"\xff\x4f\xff\x51"
or prefix[:12] == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a"
)
# ------------------------------------------------------------
# Save support
def _save(im, fp, filename):
if filename.endswith(".j2k"):
kind = "j2k"
else:
kind = "jp2"
# Get the keyword arguments
info = im.encoderinfo
offset = info.get("offset", None)
tile_offset = info.get("tile_offset", None)
tile_size = info.get("tile_size", None)
quality_mode = info.get("quality_mode", "rates")
quality_layers = info.get("quality_layers", None)
if quality_layers is not None and not (
isinstance(quality_layers, (list, tuple))
and all(
[
isinstance(quality_layer, (int, float))
for quality_layer in quality_layers
]
)
):
raise ValueError("quality_layers must be a sequence of numbers")
num_resolutions = info.get("num_resolutions", 0)
cblk_size = info.get("codeblock_size", None)
precinct_size = info.get("precinct_size", None)
irreversible = info.get("irreversible", False)
progression = info.get("progression", "LRCP")
cinema_mode = info.get("cinema_mode", "no")
fd = -1
if hasattr(fp, "fileno"):
try:
fd = fp.fileno()
except Exception:
fd = -1
im.encoderconfig = (
offset,
tile_offset,
tile_size,
quality_mode,
quality_layers,
num_resolutions,
cblk_size,
precinct_size,
irreversible,
progression,
cinema_mode,
fd,
)
ImageFile._save(im, fp, [("jpeg2k", (0, 0) + im.size, 0, kind)])
# ------------------------------------------------------------
# Registry stuff
Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept)
Image.register_save(Jpeg2KImageFile.format, _save)
Image.register_extensions(
Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"]
)
Image.register_mime(Jpeg2KImageFile.format, "image/jp2")
| python | 9,036 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.