code
stringlengths 733
1.05M
|
---|
import unittest
from hashlib import md5 as basic_md5
from flask import Flask
from flask_httpauth import HTTPDigestAuth
from werkzeug.http import parse_dict_header
def md5(str):
if type(str).__name__ == 'str':
str = str.encode('utf-8')
return basic_md5(str)
def get_ha1(user, pw, realm):
a1 = user + ":" + realm + ":" + pw
return md5(a1).hexdigest()
class HTTPAuthTestCase(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'my secret'
digest_auth_ha1_pw = HTTPDigestAuth(use_ha1_pw=True)
@digest_auth_ha1_pw.get_password
def get_digest_password(username):
if username == 'susan':
return get_ha1(username, 'hello', digest_auth_ha1_pw.realm)
elif username == 'john':
return get_ha1(username, 'bye', digest_auth_ha1_pw.realm)
else:
return None
@app.route('/')
def index():
return 'index'
@app.route('/digest_ha1_pw')
@digest_auth_ha1_pw.login_required
def digest_auth_ha1_pw_route():
return 'digest_auth_ha1_pw:' + digest_auth_ha1_pw.username()
self.app = app
self.client = app.test_client()
def test_digest_ha1_pw_auth_login_valid(self):
response = self.client.get('/digest_ha1_pw')
self.assertTrue(response.status_code == 401)
header = response.headers.get('WWW-Authenticate')
auth_type, auth_info = header.split(None, 1)
d = parse_dict_header(auth_info)
a1 = 'john:' + d['realm'] + ':bye'
ha1 = md5(a1).hexdigest()
a2 = 'GET:/digest_ha1_pw'
ha2 = md5(a2).hexdigest()
a3 = ha1 + ':' + d['nonce'] + ':' + ha2
auth_response = md5(a3).hexdigest()
response = self.client.get(
'/digest_ha1_pw', headers={
'Authorization': 'Digest username="john",realm="{0}",'
'nonce="{1}",uri="/digest_ha1_pw",'
'response="{2}",'
'opaque="{3}"'.format(d['realm'],
d['nonce'],
auth_response,
d['opaque'])})
self.assertEqual(response.data, b'digest_auth_ha1_pw:john')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(verbose_name='last login', blank=True, null=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(verbose_name='email address', unique=True, max_length=254)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('groups', models.ManyToManyField(to='auth.Group', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', verbose_name='groups', blank=True, related_query_name='user')),
('user_permissions', models.ManyToManyField(to='auth.Permission', help_text='Specific permissions for this user.', related_name='user_set', verbose_name='user permissions', blank=True, related_query_name='user')),
],
options={
'abstract': False,
},
),
]
|
# -*- test-case-name: txweb2.test.test_log -*-
##
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
"""
Default error output filter for txweb2.
"""
from txweb2 import stream, http_headers
from txweb2.responsecode import (
MOVED_PERMANENTLY, FOUND, SEE_OTHER, USE_PROXY, TEMPORARY_REDIRECT,
BAD_REQUEST, UNAUTHORIZED, PAYMENT_REQUIRED, FORBIDDEN, NOT_FOUND,
NOT_ALLOWED, NOT_ACCEPTABLE, PROXY_AUTH_REQUIRED, REQUEST_TIMEOUT, CONFLICT,
GONE, LENGTH_REQUIRED, PRECONDITION_FAILED, REQUEST_ENTITY_TOO_LARGE,
REQUEST_URI_TOO_LONG, UNSUPPORTED_MEDIA_TYPE,
REQUESTED_RANGE_NOT_SATISFIABLE, EXPECTATION_FAILED, INTERNAL_SERVER_ERROR,
NOT_IMPLEMENTED, BAD_GATEWAY, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT,
HTTP_VERSION_NOT_SUPPORTED, INSUFFICIENT_STORAGE_SPACE, NOT_EXTENDED,
RESPONSES,
)
from twisted.web.template import Element, flattenString, XMLString, renderer
# 300 - Should include entity with choices
# 301 -
# 304 - Must include Date, ETag, Content-Location, Expires, Cache-Control, Vary.
# 401 - Must include WWW-Authenticate.
# 405 - Must include Allow.
# 406 - Should include entity describing allowable characteristics
# 407 - Must include Proxy-Authenticate
# 413 - May include Retry-After
# 416 - Should include Content-Range
# 503 - Should include Retry-After
ERROR_MESSAGES = {
# 300
# no MULTIPLE_CHOICES
MOVED_PERMANENTLY:
'The document has permanently moved <a>here<t:attr name="href">'
'<t:slot name="location" /></t:attr></a>.',
FOUND:
'The document has temporarily moved <a>here<t:attr name="href">'
'<t:slot name="location" /></t:attr></a>.',
SEE_OTHER:
'The results are available <a>here<t:attr name="href">'
'<t:slot name="location" /></t:attr></a>.',
# no NOT_MODIFIED
USE_PROXY:
'Access to this resource must be through the proxy '
'<t:slot name="location" />.',
# 306 unused
TEMPORARY_REDIRECT:
'The document has temporarily moved <a><t:attr name="href">'
'<t:slot name="location" /></t:attr>here</a>.',
# 400
BAD_REQUEST:
'Your browser sent an invalid request.',
UNAUTHORIZED:
'You are not authorized to view the resource at <t:slot name="uri" />. '
"Perhaps you entered a wrong password, or perhaps your browser doesn't "
'support authentication.',
PAYMENT_REQUIRED:
'Payment Required (useful result code, this...).',
FORBIDDEN:
'You don\'t have permission to access <t:slot name="uri" />.',
NOT_FOUND:
'The resource <t:slot name="uri" /> cannot be found.',
NOT_ALLOWED:
'The requested method <t:slot name="method" /> is not supported by '
'<t:slot name="uri" />.',
NOT_ACCEPTABLE:
'No representation of <t:slot name="uri" /> that is acceptable to your '
'client could be found.',
PROXY_AUTH_REQUIRED:
'You are not authorized to view the resource at <t:slot name="uri" />. '
'Perhaps you entered a wrong password, or perhaps your browser doesn\'t '
'support authentication.',
REQUEST_TIMEOUT:
'Server timed out waiting for your client to finish sending the request.',
CONFLICT:
'Conflict (?)',
GONE:
'The resource <t:slot name="uri" /> has been permanently removed.',
LENGTH_REQUIRED:
'The resource <t:slot name="uri" /> requires a Content-Length header.',
PRECONDITION_FAILED:
'A precondition evaluated to false.',
REQUEST_ENTITY_TOO_LARGE:
'The provided request entity data is too longer than the maximum for '
'the method <t:slot name="method" /> at <t:slot name="uri" />.',
REQUEST_URI_TOO_LONG:
'The request URL is longer than the maximum on this server.',
UNSUPPORTED_MEDIA_TYPE:
'The provided request data has a format not understood by the resource '
'at <t:slot name="uri" />.',
REQUESTED_RANGE_NOT_SATISFIABLE:
'None of the ranges given in the Range request header are satisfiable by '
'the resource <t:slot name="uri" />.',
EXPECTATION_FAILED:
'The server does support one of the expectations given in the Expect '
'header.',
# 500
INTERNAL_SERVER_ERROR:
'An internal error occurred trying to process your request. Sorry.',
NOT_IMPLEMENTED:
'Some functionality requested is not implemented on this server.',
BAD_GATEWAY:
'An upstream server returned an invalid response.',
SERVICE_UNAVAILABLE:
'This server cannot service your request becaues it is overloaded.',
GATEWAY_TIMEOUT:
'An upstream server is not responding.',
HTTP_VERSION_NOT_SUPPORTED:
'HTTP Version not supported.',
INSUFFICIENT_STORAGE_SPACE:
'There is insufficient storage space available to perform that request.',
NOT_EXTENDED:
'This server does not support the a mandatory extension requested.'
}
class DefaultErrorElement(Element):
"""
An L{ErrorElement} is an L{Element} that renders some HTML for the default
rendering of an error page.
"""
loader = XMLString("""
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="error">
<head>
<title><t:slot name="code"/> <t:slot name="title"/></title>
</head>
<body>
<h1><t:slot name="title" /></h1>
<t:slot name="message" />
</body>
</html>
""")
def __init__(self, request, response):
super(DefaultErrorElement, self).__init__()
self.request = request
self.response = response
@renderer
def error(self, request, tag):
"""
Top-level renderer for page.
"""
return tag.fillSlots(
code=str(self.response.code),
title=RESPONSES.get(self.response.code),
message=self.loadMessage(self.response.code).fillSlots(
uri=self.request.uri,
location=self.response.headers.getHeader('location'),
method=self.request.method,
)
)
def loadMessage(self, code):
tag = XMLString(
('<t:transparent xmlns:t="http://twistedmatrix.com/'
'ns/twisted.web.template/0.1">') +
ERROR_MESSAGES.get(code, "") +
'</t:transparent>').load()[0]
return tag
def defaultErrorHandler(request, response):
"""
Handle errors which do not have any stream (i.e. output) associated with
them, so that users will see a nice message in their browser.
This is used as a response filter in L{txweb2.server.Request}.
"""
if response.stream is not None:
# Already got an error message
return response
if response.code < 300:
# We only do error messages
return response
message = ERROR_MESSAGES.get(response.code, None)
if message is None:
# No message specified for that code
return response
message = message % {
'uri': request.uri,
'location': response.headers.getHeader('location'),
'method': request.method,
}
data = []
error = []
(flattenString(request, DefaultErrorElement(request, response))
.addCallbacks(data.append, error.append))
# No deferreds from our renderers above, so this has always already fired.
if data:
subtype = 'html'
body = data[0]
else:
subtype = 'error'
body = 'Error in default error handler:\n' + error[0].getTraceback()
ctype = http_headers.MimeType('text', subtype, {'charset': 'utf-8'})
response.headers.setHeader("content-type", ctype)
response.stream = stream.MemoryStream(body)
return response
defaultErrorHandler.handleErrors = True
__all__ = ['defaultErrorHandler', ]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
from django.db import models
from places.models import Place
class Beruf(models.Model):
name = models.CharField(blank=True, max_length=200, help_text="Beruf des Verstorbenen")
def __str__(self):
return self.name
class Todesart(models.Model):
name = models.CharField(max_length=200, blank=True, help_text="Art und Weise des Ablebens")
def __str__(self):
return self.name
class Person(models.Model):
kramer_index = models.CharField(
blank=True, unique=True, max_length=100,
help_text="ID, übernommen aus 'Kramer, Gefallene Tirols'")
name = models.CharField(max_length=200, blank=True, help_text="Nachname des Verstorbenen")
vorname = models.CharField(max_length=200, blank=True, help_text="Vorname des Verstorbenen")
alter = models.CharField(
blank=True, null=True, max_length=30, help_text="Alter zum Zeitpunkt des Todes")
beruf = models.ForeignKey(Beruf, null=True, blank=True, help_text="Beruf des Verstorbenen")
beruf_original = models.CharField(
max_length=500, blank=True, help_text="Beruf wie in der Quelle angeführt")
geburtsort = models.ForeignKey(
Place, null=True, blank=True, verbose_name="Geburts- und/oder Wohnort",
related_name="geburtsort")
sterbeort = models.ForeignKey(
Place, null=True, blank=True, verbose_name="Ort des Todes und/oder Ort der Beerdigung",
related_name="sterbeort")
todesart = models.ForeignKey(Todesart, null=True, blank=True)
todesart_original = models.CharField(
max_length=500, blank=True, help_text="Todesart wie in der Quelle angeführt")
todesjahr = models.IntegerField(null=True, blank=True)
todesmonat = models.IntegerField(null=True, blank=True)
todestag = models.IntegerField(null=True, blank=True)
todestag_original = models.CharField(
max_length=500, blank=True, help_text="Sterbedatum wie in der Quelle angeführt")
anmerkung = models.TextField(blank=True, help_text="Platz für Anmerkungen.")
quelle = models.TextField(
null=True, blank=True, help_text="Angabe einer alternativen Quelle.")
def __str__(self):
return "{}, {}".format(self.name, self.vorname)
|
import json
import html5lib
def parse(path="html5ents.xml"):
return html5lib.parse(open(path), treebuilder="lxml")
def entity_table(tree):
return {entity_name("".join(tr[0].xpath(".//text()"))):
entity_characters(tr[1].text)
for tr in tree.xpath("//h:tbody/h:tr",
namespaces={"h": "http://www.w3.org/1999/xhtml"})}
def entity_name(inp):
return inp.strip()
def entity_characters(inp):
return "".join(codepoint_to_character(item)
for item in inp.split()
if item)
def codepoint_to_character(inp):
return ("\\U000" + inp[2:]).decode("unicode-escape")
def make_tests_json(entities):
test_list = make_test_list(entities)
tests_json = {"tests":
[make_test(*item) for item in test_list]
}
return tests_json
def make_test(name, characters, good):
return {
"description": test_description(name, good),
"input": "&%s" % name,
"output": test_expected(name, characters, good)
}
def test_description(name, good):
with_semicolon = name.endswith(";")
semicolon_text = {True: "with a semi-colon",
False: "without a semi-colon"}[with_semicolon]
if good:
text = "Named entity: %s %s" % (name, semicolon_text)
else:
text = "Bad named entity: %s %s" % (name, semicolon_text)
return text
def test_expected(name, characters, good):
rv = []
if not good or not name.endswith(";"):
rv.append("ParseError")
rv.append(["Character", characters])
return rv
def make_test_list(entities):
tests = []
for entity_name, characters in entities.items():
if entity_name.endswith(";") and not subentity_exists(entity_name, entities):
tests.append((entity_name[:-1], "&" + entity_name[:-1], False))
tests.append((entity_name, characters, True))
return sorted(tests)
def subentity_exists(entity_name, entities):
for i in range(1, len(entity_name)):
if entity_name[:-i] in entities:
return True
return False
def make_entities_code(entities):
entities_text = "\n".join(" \"%s\": u\"%s\"," % (
name, entities[name].encode(
"unicode-escape").replace("\"", "\\\""))
for name in sorted(entities.keys()))
return """entities = {
%s
}""" % entities_text
def main():
entities = entity_table(parse())
tests_json = make_tests_json(entities)
json.dump(tests_json, open("namedEntities.test", "w"), indent=4)
code = make_entities_code(entities)
open("entities_constants.py", "w").write(code)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bcolz
import pandas as pd
import numpy as np
from . import risk_free_helper
class YieldCurveStore(object):
def __init__(self, f):
self._table = bcolz.open(f, 'r')
self._dates = self._table.cols['date'][:]
def get_yield_curve(self, start_date, end_date, tenor):
d1 = start_date.year * 10000 + start_date.month * 100 + start_date.day
d2 = end_date.year * 10000 + end_date.month * 100 + end_date.day
s = self._dates.searchsorted(d1)
e = self._dates.searchsorted(d2, side='right')
if e == len(self._dates):
e -= 1
if self._dates[e] == d2:
# 包含 end_date
e += 1
if e < s:
return None
df = pd.DataFrame(self._table[s:e])
df.index = pd.Index(pd.Timestamp(str(d)) for d in df['date'])
del df['date']
df.rename(columns=lambda n: n[1:]+n[0], inplace=True)
if tenor is not None:
return df[tenor]
return df
def get_risk_free_rate(self, start_date, end_date):
tenor = risk_free_helper.get_tenor_for(start_date, end_date)
tenor = tenor[-1] + tenor[:-1]
d = start_date.year * 10000 + start_date.month * 100 + start_date.day
pos = self._dates.searchsorted(d)
if pos > 0 and (pos == len(self._dates) or self._dates[pos] != d):
pos -= 1
col = self._table.cols[tenor]
while pos >= 0 and np.isnan(col[pos]):
# data is missing ...
pos -= 1
return self._table.cols[tenor][pos]
|
from os import path
from enigma import eDVBResourceManager, Misc_Options
from Tools.Directories import fileExists, fileCheck
from Tools.HardwareInfo import HardwareInfo
from boxbranding import getBoxType, getMachineBuild
SystemInfo = { }
#FIXMEE...
def getNumVideoDecoders():
idx = 0
while fileExists("/dev/dvb/adapter0/video%d"% idx, 'f'):
idx += 1
return idx
SystemInfo["NumVideoDecoders"] = getNumVideoDecoders()
SystemInfo["PIPAvailable"] = SystemInfo["NumVideoDecoders"] > 1
SystemInfo["CanMeasureFrontendInputPower"] = eDVBResourceManager.getInstance().canMeasureFrontendInputPower()
def countFrontpanelLEDs():
leds = 0
if fileExists("/proc/stb/fp/led_set_pattern"):
leds += 1
while fileExists("/proc/stb/fp/led%d_pattern" % leds):
leds += 1
return leds
SystemInfo["12V_Output"] = Misc_Options.getInstance().detected_12V_output()
SystemInfo["ZapMode"] = fileCheck("/proc/stb/video/zapmode") or fileCheck("/proc/stb/video/zapping_mode")
SystemInfo["NumFrontpanelLEDs"] = countFrontpanelLEDs()
SystemInfo["FrontpanelDisplay"] = fileExists("/dev/dbox/oled0") or fileExists("/dev/dbox/lcd0")
SystemInfo["OledDisplay"] = fileExists("/dev/dbox/oled0")
SystemInfo["LcdDisplay"] = fileExists("/dev/dbox/lcd0")
SystemInfo["FBLCDDisplay"] = fileCheck("/proc/stb/fb/sd_detach")
SystemInfo["DeepstandbySupport"] = HardwareInfo().has_deepstandby()
SystemInfo["Fan"] = fileCheck("/proc/stb/fp/fan")
SystemInfo["FanPWM"] = SystemInfo["Fan"] and fileCheck("/proc/stb/fp/fan_pwm")
SystemInfo["StandbyPowerLed"] = fileExists("/proc/stb/power/standbyled")
SystemInfo["LEDButtons"] = getBoxType() == 'vuultimo'
SystemInfo["WakeOnLAN"] = fileCheck("/proc/stb/power/wol") or fileCheck("/proc/stb/fp/wol")
SystemInfo["HDMICEC"] = (fileExists("/dev/hdmi_cec") or fileExists("/dev/misc/hdmi_cec0")) and fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/HdmiCEC/plugin.pyo")
SystemInfo["SABSetup"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/SABnzbd/plugin.pyo")
SystemInfo["SeekStatePlay"] = False
SystemInfo["GraphicLCD"] = getBoxType() in ('vuultimo', 'xpeedlx3', 'et10000', 'mutant2400', 'quadbox2400', 'sezammarvel', 'atemionemesis', 'mbultra', 'beyonwizt4')
SystemInfo["Blindscan"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/Blindscan/plugin.pyo")
SystemInfo["Satfinder"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/Satfinder/plugin.pyo")
SystemInfo["HasExternalPIP"] = getMachineBuild() not in ('et9x00', 'et6x00', 'et5x00') and fileCheck("/proc/stb/vmpeg/1/external")
SystemInfo["hasPIPVisibleProc"] = fileCheck("/proc/stb/vmpeg/1/visible")
SystemInfo["VideoDestinationConfigurable"] = fileExists("/proc/stb/vmpeg/0/dst_left")
SystemInfo["GBWOL"] = fileExists("/usr/bin/gigablue_wol")
SystemInfo["LCDSKINSetup"] = path.exists("/usr/share/enigma2/display")
SystemInfo["VFD_scroll_repeats"] = fileCheck("/proc/stb/lcd/scroll_repeats")
SystemInfo["VFD_scroll_delay"] = fileCheck("/proc/stb/lcd/scroll_delay")
SystemInfo["VFD_initial_scroll_delay"] = fileCheck("/proc/stb/lcd/initial_scroll_delay")
SystemInfo["VFD_final_scroll_delay"] = fileCheck("/proc/stb/lcd/final_scroll_delay")
SystemInfo["LCDMiniTV"] = fileExists("/proc/stb/lcd/mode")
SystemInfo["LCDMiniTVPiP"] = SystemInfo["LCDMiniTV"] and getBoxType() != 'gb800ueplus'
SystemInfo["LcdLiveTV"] = fileCheck("/proc/stb/fb/sd_detach")
SystemInfo["CIHelper"] = fileExists("/usr/bin/cihelper")
SystemInfo["grautec"] = fileExists("/tmp/usbtft")
SystemInfo["3DMode"] = fileCheck("/proc/stb/fb/3dmode") or fileCheck("/proc/stb/fb/primary/3d")
SystemInfo["3DZNorm"] = fileCheck("/proc/stb/fb/znorm") or fileCheck("/proc/stb/fb/primary/zoffset")
SystemInfo["CanUse3DModeChoices"] = fileExists('/proc/stb/fb/3dmode_choices') and True or False
|
# -*- coding: utf-8 -*-
"""
Extract dependencies from a Semantic Dependency Parsing treebank.
Usage: extract_sdp.py [--sep sep] [--first_arg_col first_arg_col] IN_FILE OUT_TEXT_FILE OUT_HEAD_FILE OUT_DEPREL_FILE
Arguments:
IN_FILE SDP file in sdp format
OUT_TEXT_FILE File to write raw texts, one sentence per line
OUT_HEAD_FILE File to write heads, which are either an ID (1-indexed) or 0 (for no dependency)
If a word has more than one head, then its heads will be sparated by --sep
OUT_DEPREL_FILE File to write UD relations to the head
IF a word has more than one head, then its relations to the heads will be separated by --sep
Options:
-h, --help show this help message
--sep sep separator for multiple heads (Default: "|")
--first_arg_col first_arg_col first argument column id (0-indexed) (Default: 7)
"""
from docopt import docopt
import codecs
def run(sdp_file, out_text_file, out_head_file, out_deprel_file, sep, first_arg_col, encoding='UTF-8'):
with codecs.open(sdp_file, encoding=encoding) as f_sdp:
with codecs.open(out_text_file, 'w', encoding=encoding) as f_out_text:
with codecs.open(out_head_file, 'w', encoding=encoding) as f_out_head:
with codecs.open(out_deprel_file, 'w', encoding=encoding) as f_out_deprel:
words, rels, preds, pred_ids = [], [], [], []
tok_id = 0
for line in f_sdp:
#print line
if line.startswith('#'):
continue
if line.strip() == '':
# map pred order to id, then join multiple heads
heads = []
for cur_preds in preds:
if len(cur_preds) > 0:
heads.append(sep.join([str(pred_ids[cur_pred]) for cur_pred in cur_preds]))
else:
heads.append('0')
if len(words) > 0:
f_out_text.write(' '.join(words) + '\n')
f_out_deprel.write(' '.join(rels) + '\n')
f_out_head.write(' '.join(heads) + '\n')
words, rels, preds, pred_ids = [], [], [], []
tok_id = 0
continue
splt = line.strip().split('\t')
tok_id += 1
# is predicate
if splt[5] == '+':
pred_ids.append(tok_id)
words.append(splt[1])
cur_preds, cur_rels = [], []
# look for arguments
for i in xrange(first_arg_col, len(splt)):
# is argument
if splt[i] != '_':
# get the pred's order
cur_preds.append(i-first_arg_col)
cur_rels.append(splt[i])
preds.append(cur_preds)
if len(cur_rels) > 0:
rels.append(sep.join(cur_rels))
else:
rels.append('_')
if __name__ == '__main__':
args = docopt(__doc__)
sep = '|'
if args['--sep']:
sep = args['--sep']
first_arg_col = 7
if args['--first_arg_col']:
first_arg_col = args['--first_arg_col']
run(args['IN_FILE'], args['OUT_TEXT_FILE'], args['OUT_HEAD_FILE'], args['OUT_DEPREL_FILE'], sep=sep, first_arg_col=first_arg_col)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Video module
"""
# video.py
# This file is part of limestonedb
#
# Copyright (C) 2014 - Enrico Polesel
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lsutilities
debug = 5
def print_debug(level,msg):
if level <= debug:
print("[limestonedb film][debug] "+str(msg))
def get_substone(file_name):
try:
data = lsutilities.get_raw_media_informations(file_name)
except:
return {}
else:
output = {}
for inname, outname in [
("format_name", "format_name"), # matroska,webm
("duration","duration"), # 4515.712000
]:
if inname in data['format']:
output[outname] = data['format'][inname]
if 'tags' in data['format']:
for inname, outname in [
#("artist","artist"),
("title","title"),
#("genre","genre"),
]:
if inname in data['format']['tags']:
output[outname] = data['format']['tags'][inname]
output['video_stream'] = {}
output['audio_streams'] = []
output['subtitle_streams'] = []
for stream in data['streams']:
if stream['codec_type'] == 'video':
for inname, outname in [
('codec_name','codec'),
('width','width'),
('height','height'),
]:
if inname in stream:
output['video_stream'][outname] = stream[inname]
elif stream['codec_type'] == 'audio':
thisstream = {}
for inname, outname in [
('codec_name','codec'),
('channels','channels'),
('index','index'),
]:
if inname in stream:
thisstream[outname] = stream[inname]
if 'tags' in stream:
for inname, outname in [
('title','title'),
('language','language'),
]:
if inname in stream['tags']:
thisstream[outname] = stream['tags'][inname]
if thisstream != {}:
output['audio_streams'].append(thisstream)
elif stream['codec_type'] == 'subtitle':
thisstream = {}
for inname, outname in [
('codec_name','codec'),
('index','index'),
]:
if inname in stream:
thisstream[outname] = stream[inname]
if 'tags' in stream:
for inname, outname in [
('title','title'),
('language','language'),
]:
if inname in stream['tags']:
thisstream[outname] = stream['tags'][inname]
if thisstream != {}:
output['subtitle_streams'].append(thisstream)
return output
|
from lixian import XunleiClient
from lixian_commands.util import *
from lixian_cli_parser import *
from lixian_config import get_config
import lixian_help
from getpass import getpass
@command_line_parser(help=lixian_help.login)
@with_parser(parse_login)
@with_parser(parse_logging)
def login(args):
if args.cookies == '-':
args._args['cookies'] = None
if len(args) < 1:
args.username = args.username or XunleiClient(cookie_path=args.cookies, login=False).get_username() or get_config('username') or raw_input('ID: ')
args.password = args.password or get_config('password') or getpass('Password: ')
elif len(args) == 1:
args.username = args.username or XunleiClient(cookie_path=args.cookies, login=False).get_username() or get_config('username')
args.password = args[0]
if args.password == '-':
args.password = getpass('Password: ')
elif len(args) == 2:
args.username, args.password = list(args)
if args.password == '-':
args.password = getpass('Password: ')
elif len(args) == 3:
args.username, args.password, args.cookies = list(args)
if args.password == '-':
args.password = getpass('Password: ')
elif len(args) > 3:
raise RuntimeError('Too many arguments')
if not args.username:
raise RuntimeError("What's your name?")
if args.cookies:
print 'Saving login session to', args.cookies
else:
print 'Testing login without saving session'
import lixian_verification_code
verification_code_reader = lixian_verification_code.default_verification_code_reader(args)
XunleiClient(args.username, args.password, args.cookies, login=True, verification_code_reader=verification_code_reader)
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import os, csv, decimal
from django.core.management.base import NoArgsCommand
from locations.models import * #we are importing model classes from locations app
from ubuzima.models import *
from django.core import management
IMPORT_DIR = os.path.abspath("apps/ubuzima/import")
#the indexes in fosa_table.csv
PROVINCE_NAME = 5
PROVINCE_CODE = 4
NAME = 2
CODE = 0
TYPE = 3
LONGITUDE = 11
LATITUDE = 10
PARENT = 17
# indexes in fosa_villages.csv
VILLAGE_NAME = 1
VILLAGE_CODE = 0
VILLAGE_DISTRICT_CODE = 6
VILLAGE_SECTOR_CODE = 7
#indexes for District.csv
DISTRICT_NAME = 2
DISTRICT_CODE = 1
DISTRICT_PROVINCE_CODE = 3
#indexes for Sector.csv
SECTOR_CODE = 1
SECTOR_NAME = 2
SECTOR_DISTRICT_CODE = 5
class Command(NoArgsCommand):
def _csv(self, filename):
"""Returns a CSV reader for _filename_
relative to the sources directory."""
path = os.path.join(IMPORT_DIR, filename)
return csv.reader(open(path, "rU"))
def _hospital_name(self, original):
return original.capitalize()
def _loc_type (self, name):
return LocationType.objects.get(name__iexact=name)
def __init__(self):
self.provinces = {}
self.districts = {}
self.sectors = {}
self.hospitals = {}
self.healthcentres = {}
def handle_noargs(self, **options):
#delete all locations before insertion,
#we might change this during production
Location.objects.all().delete()
#load location_types fixture
management.call_command('loaddata', 'fosa_location_types.json')
# init our reporter groups
management.call_command('loaddata', 'groups.json')
# init our reporting objects
management.call_command('loaddata', 'reporting.json')
rows = list(self._csv("fosa_table.csv"))
rows = rows[1:]
for row in rows:
# ensure that the province exists
province, p_created = \
self._loc_type("province").locations.get_or_create(
name=row[PROVINCE_NAME].capitalize(),
code="0" + row[PROVINCE_CODE])
if p_created:
print ". Created Province: %s" % (province)
self.provinces[province.code] = province
district_rows = list(self._csv("District.csv"))
district_rows = district_rows[1:]
for row in district_rows:
# ensure that the district exists, and is
# linked to the province named on this row
district, d_created = \
self._loc_type("district").locations.get_or_create(
parent = self.provinces[row[DISTRICT_PROVINCE_CODE]],
#parent = province,
name=row[DISTRICT_NAME].capitalize(),
code=row[DISTRICT_CODE]
)
if d_created:
print ". Created District: %s" % (district)
self.districts[district.code] = district
sector_rows = list(self._csv("Sector.csv"))
sector_rows = sector_rows[1:]
for row in sector_rows:
# making sure the sector exist linked to the district
sector, s_created = \
self._loc_type("sector").locations.get_or_create(
parent = self.districts[row[SECTOR_DISTRICT_CODE]],
#parent = district,
name=row[SECTOR_NAME].capitalize(),
code=row[SECTOR_CODE]
)
if s_created:
print ". Created Sector: %s (%s)" % (sector, sector.code)
self.sectors[sector.code] = sector
# second iteration: create all of the hospitals. we must do
# this before the health centres, since many health centres
# link (by name) to the hospitals before they are listed
for row in rows:
if row[TYPE] == "HD" or row[TYPE] == "HM":
try:
# wooo geo co-ords!
lat = decimal.Decimal(row[LATITUDE])
lon = decimal.Decimal(row[LONGITUDE])
# django doesn't accept invalid decimals, so
# leave both fields null if they can't be cast
except decimal.InvalidOperation:
lat = lon = None
print "Adding Hopital: %s %s" % (row[NAME], row[CODE])
hospital, created = \
self._loc_type("hospital").locations.get_or_create(
#parent=self.sectors[row[SECTOR_CODE]],
parent = sector,
name=self._hospital_name(row[NAME]),
code=fosa_to_code(row[CODE]),
latitude=lat,
longitude=lon
)
if created:
print ". Created Hospital: %s" %\
(hospital)
self.hospitals[hospital.name] = hospital
# third iteration: create all of the remaining health
# centres, and link them back to the hospitals. this is
# very similar to above, and should probably be refactored
for row in rows:
if row[TYPE] == "CS":
# some locations are missing their
# government FOSA CODE. this just
# won't do, so skip it
if not row[CODE]:
print "! Health Centre missing FOSA code: %s" % (row[NAME])
continue
try:
# wooo geo co-ords!
lat = decimal.Decimal(row[LATITUDE])
lon = decimal.Decimal(row[LONGITUDE])
# django doesn't accept invalid decimals, so
# leave both fields null if they can't be cast
except decimal.InvalidOperation:
lat = lon = None
# resolve the hospital name into an object.
# if the parent was invalid, skip this location
try:
parent = self.hospitals[self._hospital_name(row[PARENT])]
except KeyError:
print "! Unable to find parent hospital for HC: %s" % row[CODE]
continue
healthcentre, created = \
self._loc_type("health centre").locations.get_or_create(
parent=parent,
name=row[NAME],
code=fosa_to_code(row[CODE]),
latitude=lat,
longitude=lon)
if created:
print ". Created Health Centre: %s" %\
(healthcentre)
village_rows = list(self._csv("fosa_villages.csv"))
village_rows = village_rows[1:]
for row in village_rows:
sectorCode = "0" + row[VILLAGE_SECTOR_CODE]
if sectorCode in self.sectors:
parent = self.sectors[sectorCode]
else:
print "Unable to find parent for village: %s (sector: %s)" % (row[VILLAGE_CODE], sectorCode)
continue
village, v_created= \
self._loc_type("village").locations.get_or_create(
parent=parent,
name=row[VILLAGE_NAME].capitalize(),
code=row[VILLAGE_CODE]
)
if v_created:
print ". Created Village: %s" % (village)
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <[email protected]>
# (c) 2016, Steve Baker <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Mathieu Bultel (matbu), Steve Baker (steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
template:
description:
- Path of the template file to use for the stack creation
required: false
default: None
environment:
description:
- List of environment files that should be used for the stack creation
required: false
default: None
parameters:
description:
- Dictionary of parameters for the stack creation
required: false
default: None
rollback:
description:
- Rollback stack creation
required: false
default: false
timeout:
description:
- Maximum number of seconds to wait for the stack creation
required: false
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
stack:
action:
description: Action, could be Create or Update.
type: string
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: string
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: string
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: string
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: string
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from time import sleep
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _create_stack(module, stack, cloud):
try:
stack = cloud.create_stack(module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
return False
module.fail_json(msg="Failure in creating stack: {0}".format(stack))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _update_stack(module, stack, cloud):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg = "Failure in updating stack: %s" %
stack['stack_status_reason'])
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# stack API introduced in 1.8.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
module.fail_json(msg='shade 1.8.0 or higher is required for this module')
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
try:
cloud = shade.openstack_cloud(**module.params)
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud)
else:
stack = _update_stack(module, stack, cloud)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
import re
from base64 import b64encode
import mimetypes
from django.utils import six
from django.template import (
Library, Node, VariableDoesNotExist, TemplateSyntaxError)
from django.utils.html import escape
from easy_thumbnails import utils
from easy_thumbnails.alias import aliases
from easy_thumbnails.conf import settings
from easy_thumbnails.files import get_thumbnailer
register = Library()
RE_SIZE = re.compile(r'(\d+)x(\d+)$')
VALID_OPTIONS = utils.valid_processor_options()
VALID_OPTIONS.remove('size')
VALID_OPTIONS.append('HIGH_RESOLUTION')
def split_args(args):
"""
Split a list of argument strings into a dictionary where each key is an
argument name.
An argument looks like ``crop``, ``crop="some option"`` or ``crop=my_var``.
Arguments which provide no value get a value of ``True``.
"""
args_dict = {}
for arg in args:
split_arg = arg.split('=', 1)
if len(split_arg) > 1:
value = split_arg[1]
else:
value = True
args_dict[split_arg[0]] = value
return args_dict
class ThumbnailNode(Node):
def __init__(self, source_var, opts, context_name=None):
self.source_var = source_var
self.opts = opts
self.context_name = context_name
def render(self, context):
# Note that this isn't a global constant because we need to change the
# value for tests.
raise_errors = settings.THUMBNAIL_DEBUG
# Get the source file.
try:
source = self.source_var.resolve(context)
except VariableDoesNotExist:
if raise_errors:
raise VariableDoesNotExist(
"Variable '%s' does not exist." % self.source_var)
return self.bail_out(context)
if not source:
if raise_errors:
raise TemplateSyntaxError(
"Variable '%s' is an invalid source." % self.source_var)
return self.bail_out(context)
# Resolve the thumbnail option values.
try:
opts = {}
for key, value in six.iteritems(self.opts):
if hasattr(value, 'resolve'):
value = value.resolve(context)
opts[str(key)] = value
except Exception:
if raise_errors:
raise
return self.bail_out(context)
# Size variable can be either a tuple/list of two integers or a
# valid string.
size = opts['size']
if isinstance(size, six.string_types):
m = RE_SIZE.match(size)
if m:
opts['size'] = (int(m.group(1)), int(m.group(2)))
else:
# Size variable may alternatively be referencing an alias.
alias = aliases.get(size, target=source)
if alias:
del opts['size']
opts = dict(alias, **opts)
else:
if raise_errors:
raise TemplateSyntaxError(
"%r is not a valid size." % size)
return self.bail_out(context)
# Ensure the quality is an integer.
if 'quality' in opts:
try:
opts['quality'] = int(opts['quality'])
except (TypeError, ValueError):
if raise_errors:
raise TemplateSyntaxError(
"%r is an invalid quality." % opts['quality'])
return self.bail_out(context)
# Ensure the subsampling level is an integer.
if 'subsampling' in opts:
try:
opts['subsampling'] = int(opts['subsampling'])
except (TypeError, ValueError):
if raise_errors:
raise TemplateSyntaxError(
"%r is an invalid subsampling level." %
opts['subsampling'])
return self.bail_out(context)
try:
thumbnail = get_thumbnailer(source).get_thumbnail(opts)
except Exception:
if raise_errors:
raise
return self.bail_out(context)
# Return the thumbnail file url, or put the file on the context.
if self.context_name is None:
return escape(thumbnail.url)
else:
context[self.context_name] = thumbnail
return ''
def bail_out(self, context):
if self.context_name:
context[self.context_name] = ''
return ''
@register.tag
def thumbnail(parser, token):
"""
Creates a thumbnail of an ImageField.
Basic tag Syntax::
{% thumbnail [source] [size] [options] %}
*source* must be a ``File`` object, usually an Image/FileField of a model
instance.
*size* can either be:
* the name of an alias
* the size in the format ``[width]x[height]`` (for example,
``{% thumbnail person.photo 100x50 %}``) or
* a variable containing a valid size (i.e. either a string in the
``[width]x[height]`` format or a tuple containing two integers):
``{% thumbnail person.photo size_var %}``.
*options* are a space separated list of options which are used when
processing the image to a thumbnail such as ``sharpen``, ``crop`` and
``quality=90``.
If *size* is specified as an alias name, *options* are used to override
and/or supplement the options defined in that alias.
The thumbnail tag can also place a
:class:`~easy_thumbnails.files.ThumbnailFile` object in the context,
providing access to the properties of the thumbnail such as the height and
width::
{% thumbnail [source] [size] [options] as [variable] %}
When ``as [variable]`` is used, the tag doesn't output anything. Instead,
use the variable like a standard ``ImageFieldFile`` object::
{% thumbnail obj.picture 200x200 upscale as thumb %}
<img href="{{ thumb.url }}"
width="{{ thumb.width }}"
height="{{ thumb.height }}" />
**Debugging**
By default, if there is an error creating the thumbnail or resolving the
image variable then the thumbnail tag will just return an empty string (and
if there was a context variable to be set then it will also be set to an
empty string).
For example, you will not see an error if the thumbnail could not
be written to directory because of permissions error. To display those
errors rather than failing silently, set ``THUMBNAIL_DEBUG = True`` in
your Django project's settings module.
"""
args = token.split_contents()
tag = args[0]
# Check to see if we're setting to a context variable.
if len(args) > 4 and args[-2] == 'as':
context_name = args[-1]
args = args[:-2]
else:
context_name = None
if len(args) < 3:
raise TemplateSyntaxError(
"Invalid syntax. Expected "
"'{%% %s source size [option1 option2 ...] %%}' or "
"'{%% %s source size [option1 option2 ...] as variable %%}'" %
(tag, tag))
opts = {}
# The first argument is the source file.
source_var = parser.compile_filter(args[1])
# The second argument is the requested size. If it's the static "10x10"
# format, wrap it in quotes so that it is compiled correctly.
size = args[2]
match = RE_SIZE.match(size)
if match:
size = '"%s"' % size
opts['size'] = parser.compile_filter(size)
# All further arguments are options.
args_list = split_args(args[3:]).items()
for arg, value in args_list:
if arg in VALID_OPTIONS:
if value and value is not True:
value = parser.compile_filter(value)
opts[arg] = value
else:
raise TemplateSyntaxError("'%s' tag received a bad argument: "
"'%s'" % (tag, arg))
return ThumbnailNode(source_var, opts=opts, context_name=context_name)
@register.filter
def thumbnailer(obj, relative_name=None):
"""
Creates a thumbnailer from an object (usually a ``FileField``).
Example usage::
{% with photo=person.photo|thumbnailer %}
{% if photo %}
<a href="{{ photo.large.url }}">
{{ photo.square.tag }}
</a>
{% else %}
<img src="{% static 'template/fallback.png' %}" alt="" />
{% endif %}
{% endwith %}
If you know what you're doing, you can also pass the relative name::
{% with photo=storage|thumbnailer:'some/file.jpg' %}...
"""
return get_thumbnailer(obj, relative_name=relative_name)
@register.filter
def thumbnailer_passive(obj):
"""
Creates a thumbnailer from an object (usually a ``FileFile``) that won't
generate new thumbnails.
This is useful if you are using another process to generate the thumbnails
rather than having them generated on the fly if they are missing.
Example usage::
{% with avatar=person.avatar|thumbnailer_passive %}
{% with avatar_thumb=avatar.small %}
{% if avatar_thumb %}
<img src="{{ avatar_thumb.url }}" alt="" />
{% else %}
<img src="{% static 'img/default-avatar-small.png' %}"
alt="" />
{% endif %}
{% endwith %}
{% endwith %}
"""
thumbnailer = get_thumbnailer(obj)
thumbnailer.generate = False
return thumbnailer
@register.filter
def thumbnail_url(source, alias):
"""
Return the thumbnail url for a source file using an aliased set of
thumbnail options.
If no matching alias is found, returns an empty string.
Example usage::
<img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
"""
try:
thumb = get_thumbnailer(source)[alias]
except Exception:
return ''
return thumb.url
@register.filter
def data_uri(thumbnail):
"""
This filter will return the base64 encoded data URI for a given thumbnail object.
Example usage::
{% thumbnail sample_image 25x25 crop as thumb %}
<img src="{{ thumb|data_uri }}">
will for instance be rendered as:
<img src="data:image/png;base64,iVBORw0KGgo...">
"""
try:
thumbnail.open('rb')
data = thumbnail.read()
finally:
thumbnail.close()
mime_type = mimetypes.guess_type(str(thumbnail.file))[0] or 'application/octet-stream'
data = b64encode(data).decode('utf-8')
return 'data:{0};base64,{1}'.format(mime_type, data)
|
#!/usr/bin/env python3
from subprocess import check_output,CalledProcessError
from re import search
from os import rename, unlink
from os.path import isfile, join as pjoin
args = [
r'-i audio.flac -vf 24000/1001 test.avs --test',
r'-i audio.flac -vf 24/1.001 test.avs --test',
r'-i audio.flac -v --ofps 24/1.001 test.avs --test',
r'-i audio.flac -vf tc1-cfr.txt test.avs --test',
r'-i audio.flac -vf tc1-vfr.txt test.avs --test',
r'-i audio.flac -vf tc2-cfr.txt test.avs --test',
r'-i audio.flac -vf tc2-vfr.txt test.avs --test',
r'-f 24/1.001 -c chap-fps-{}.txt -n chnames.txt test.avs',
r'-f tc1-cfr.txt -c chap-cfr-{}.txt -n chnames.txt test.avs',
r'-f 24/1.001 -c chap-fps-{}.xml -n chnames.txt --uid 123456 test.avs',
r'-f tc1-cfr.txt -c chap-cfr-{}.xml -t amkvc.mod.txt --uid 123456 test.avs'
]
stable = check_output('git tag',shell=True).decode()[:-1].split('\n')[-1]
current = search('^\* (\w+)(?m)',check_output("git branch",shell=True).decode()[:-1]).group(1)
check_output('git show {0}:vfr.py > vfr.py'.format(stable), shell=True)
check_output('git show {0}:templates.py > templates.py'.format(stable), shell=True)
try:
old = [check_output(r'python vfr.py ' + command.format('old'), shell=True) for command in args]
new = [check_output(r'python {0} {1}'.format(pjoin('..', 'vfr.py'), command.format('new')), shell=True) for command in args]
fails = []
for i in range(len(old)):
if old[i] != new[i]:
fails.append(args[i])
chapters = [[f.format('old'),f.format('new')] for f in ['chap-fps-{}.txt','chap-cfr-{}.txt','chap-fps-{}.xml','chap-cfr-{}.xml','chap-cfr-{}tags.xml','chap-cfr-{}.qpfile']]
for f in chapters:
with open(f[0],'rb') as oldf:
with open(f[1],'rb') as newf:
old = oldf.readlines()
new = newf.readlines()
if old != new:
fails.append('{0} and {1} are not identical.'.format(f[0],f[1]))
if len(fails) != 0:
print('Failed:')
[print(i) for i in fails]
else:
print('All tests passed.')
f += ['vfr.py','templates.py']
[[unlink(ff) for ff in f] for f in chapters]
except CalledProcessError:
pass
|
# http://www.onlamp.com/pub/a/python/2001/01/17/xmlrpcserver.html
import os
import re
import sys
import time
import socket
import signal
import random
import tempfile
import xmlrpclib
import threading
import subprocess
def _is_valid_ruby_class_identifer(ruby_class):
return bool(re.compile("([A-Za-z_]+(::)?)+").match(ruby_class))
def _random_ruby_context_address_indicator():
return "".join([random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") for x in range(50)])
class RubyContext(object):
def __init__(self, port=None, host="127.0.0.1", requires=None, setup=None, debug=False):
# set up internal state
self.__debug = debug
self.__xmlrpc_server_proc = None
self.__xmlrpc_client = None
self.__max_tries = 50
self.__allow_none = True
self.__ruby_context_address_indicator = _random_ruby_context_address_indicator()
self.__proxy_lookup = {}
# set up Python XMLRPC arguments
self.__python_verbose = False
# set up Ruby XMLRPC arguments
self.__ruby_port = port
self.__ruby_host = host
self.__ruby_max_connections = 4
self.__ruby_audit = True
if self.__debug:
self.__ruby_stdlog = "$stdout"
self.__ruby_audit = True
self.__ruby_debug = True
else:
self.__ruby_stdlog = "StringIO.new"
self.__ruby_debug = False
# set up additional Ruby arguments
self.__ruby_allow_nils = self.__allow_none
self.__ruby_requires = requires or []
self.__ruby_setup = setup or ""
# install signal handling
this = self
for unload_signal in [signal.SIGINT]:
original_sig_cb = signal.getsignal(unload_signal)
def new_sig_cb(*args, **kwargs):
this.unload()
original_sig_cb(*args, **kwargs)
signal.signal(unload_signal, new_sig_cb)
def load(self):
self.__ensure_started()
def unload(self):
if self.__xmlrpc_server_proc:
self.__xmlrpc_client.registry.shutdown()
os.waitpid(self.__xmlrpc_server_proc.pid, 0)
self.__xmlrpc_server_proc = None
self.__ruby_port = None
def reload(self):
unload()
load()
def get(self, ruby_class):
self.__ensure_started()
if not _is_valid_ruby_class_identifer(ruby_class=ruby_class):
raise ValueError("invalid Ruby class name: %r" % ruby_class)
ruby_context_address = self.__xmlrpc_client.registry.get_object(ruby_class)
return RubyProxy(context=self, ruby_context_address=ruby_context_address)
def module(self, ruby_module):
self.__ensure_started()
if not _is_valid_ruby_class_identifer(ruby_class=ruby_class):
raise ValueError("invalid Ruby module name: %r" % ruby_class)
ruby_context_address = self.__xmlrpc_client.registry.get_object(ruby_module)
return RubyProxy(context=self, ruby_context_address=ruby_context_address)
def evaluate_on_instance(self, ruby_context_address, code):
self.__ensure_started()
value = self.__xmlrpc_client.registry.evaluate_on_instance(ruby_context_address, code)
return self.__transform_value(value)
def __call__(self, code):
self.__ensure_started()
value = self.__xmlrpc_client.registry.evaluate(code)
return self.__transform_value(value)
def __transform_value(self, value):
# check for special values, they come across the wire as lists
if isinstance(value, [].__class__) and len(value) == 3:
# this might be a Ruby context address
if value[0] == self.__ruby_context_address_indicator:
# it is a Ruby context address value
ruby_class = value[1]
ruby_context_address = value[2]
proxy = self.__lookup_proxy(ruby_context_address=ruby_context_address)
if proxy:
# we already had this proxy mapped
return proxy
else:
# this proxy was auto-generated in Ruby, wrap it in a RubyProxy
# TODO: choose the right Python class? this will require metaclasses
proxy = RubyProxy(
context=self,
ruby_context_address=ruby_context_address,
)
self.__track_proxy(proxy=proxy, ruby_context_address=ruby_context_address)
return proxy
# we never transformed it, just return the original value
return value
def __lookup_proxy(self, ruby_context_address):
return self.__proxy_lookup.get(ruby_context_address, None)
def __track_proxy(self, proxy, ruby_context_address):
self.__proxy_lookup[ruby_context_address] = proxy
def __choose_unused_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.__ruby_host, 0))
addr, port = s.getsockname()
s.close()
return port
def __ensure_started(self):
if not self.__xmlrpc_server_proc:
# choose a port
self.__ruby_port = self.__choose_unused_port()
# create a temporary file to store the script in
script = self.__create_script()
dontcare, filename = tempfile.mkstemp()
fd = open(filename, "w")
try:
fd.write(script)
finally:
fd.close()
# build the subprocess arguments
args = ["ruby", "-W0", filename]
if self.__debug:
# debug mode, allow all server output to be displayed
print >> sys.stderr, "starting Ruby context on http://%s:%s/" % (self.__ruby_host, self.__ruby_port)
self.__xmlrpc_server_proc = subprocess.Popen(args=args)
else:
# not debug mode, hide all server output
if os.path.exists("nul:"):
stdout = open("nul:", "w")
stderr = open("nul:", "w")
elif os.path.exists("/dev/null"):
stdout = open("/dev/null", "w")
stderr = open("/dev/null", "w")
else:
stdout = None
stderr = subprocess.PIPE
self.__xmlrpc_server_proc= subprocess.Popen(
args=args,
stdout=stdout,
stderr=stderr,
close_fds=True,
bufsize=2,
)
# wait for the ruby server to start
socket_available = False
tries_remaining = self.__max_tries
while not socket_available and tries_remaining > 0:
try:
s = socket.socket()
s.connect((self.__ruby_host, self.__ruby_port))
s.close()
socket_available = True
except socket.error, e:
socket_available = False
time.sleep(0.1)
tries_remaining -= 1
# ruby server started, connect to it
# TODO: basic HTTP AUTH?
self.__xmlrpc_client = xmlrpclib.Server(
uri="http://%s:%s/" % (self.__ruby_host, self.__ruby_port),
verbose=self.__python_verbose,
allow_none=self.__allow_none,
)
def __create_script(self):
require_statements = "\n".join(["require %r" % rlib for rlib in self.__ruby_requires])
script = '''
require "xmlrpc/server"
require "xmlrpc/create"
%(require_statements)s
%(setup)s
module XMLRPC
class Create
def will_throw_serialization_exception(obj)
begin
conv2value(obj)
return false
rescue StandardError => e
return true
end
end
end
end
if %(allow_nils)s
XMLRPC::Config.const_set(:ENABLE_NIL_CREATE, true)
end
module Rython
class Registry
def initialize(server)
@server = server
@proxies = {}
end
def get_object(name)
obj = eval(name)
ruby_context_address = generate_ruby_context_address(obj)
add_proxy(ruby_context_address, obj)
ruby_context_address
end
def evaluate_on_instance(ruby_context_address, code)
obj = @proxies[ruby_context_address]
if obj
obj.instance_eval(code)
else
raise StandardError, "no object exists at '#{ruby_context_address}'"
end
end
def evaluate(code)
eval(code)
end
def generate_ruby_context_address(obj)
"ruby\##{obj.class.to_s}[#{obj.object_id}]"
end
def add_proxy(ruby_context_address, proxy)
@proxies[ruby_context_address] = proxy
end
def get_proxy(ruby_context_address)
@proxies[ruby_context_address]
end
def get_ruby_context_address(proxy)
@proxies.index(proxy)
end
def shutdown
@server.shutdown
end
end
def self.registry=(val)
@registry = val
end
def self.registry
@registry
end
server = XMLRPC::Server.new(%(port)s, '%(host)s', %(max_connections)s, %(stdlog)s, %(audit)s, %(debug)s)
server.add_introspection
self.registry = Registry.new(server)
server.add_handler("registry", self.registry)
# check for serialization errors
checker = XMLRPC::Create.new
server.set_service_hook do |obj, *args|
# call the method
retval = obj.call(*args)
# try to get the Ruby context address for this object
ruby_context_address = Rython::registry.get_ruby_context_address(retval)
if !ruby_context_address and checker.will_throw_serialization_exception(retval)
# automatically make it a RubyProxy in Python land
# TODO: if this is an Array, iterate through and add contexts for EACH
ruby_context_address = Rython::registry.generate_ruby_context_address(retval)
Rython::registry.add_proxy(ruby_context_address, retval)
end
if ruby_context_address
# this is a RubyProxy in Python land, refer to it
[%(ruby_context_address_indicator)r, "#{retval.class.to_s}", "#{ruby_context_address}"]
else
# this return value is fine, no need to wrap it up
retval
end
end
server.serve
end
''' % dict(
require_statements=require_statements,
setup=self.__ruby_setup,
port=self.__ruby_port,
host=self.__ruby_host,
max_connections=self.__ruby_max_connections,
stdlog=self.__ruby_stdlog,
audit=str(self.__ruby_audit).lower(), # True/False ==> true/false
debug=str(self.__ruby_debug).lower(), # True/False ==> true/false
allow_nils=str(self.__ruby_allow_nils).lower(), # True/False ==> true/false
ruby_context_address_indicator=self.__ruby_context_address_indicator,
)
return script
class RubyProxy(object):
ruby_context_address = property(lambda self: self.__ruby_context_address)
def __init__(self, context, ruby_context_address):
self.__context = context
self.__ruby_context_address = ruby_context_address
def __call__(self, code, *args, **kwargs):
if args and kwargs:
raise ValueError("cannot mix sequenced arguments with keyword arguments when calling to the Ruby context")
if kwargs:
transformed_kwargs = {}
for k,v in kwargs.iteritems():
transformed_kwargs[k] = self.__transform_argument(v)
substituted_code = code % transformed_kwargs
else:
substituted_code = code % tuple([self.__transform_argument(a) for a in args])
return self.__context.evaluate_on_instance(ruby_context_address=self.__ruby_context_address, code=substituted_code)
def __getattr__(self, name):
method_name = name
context = self.__context
transform_argument = self.__transform_argument
# create a method proxy
def method_proxy(*args):
# transform all the arguments to things that xmlrpc
# with the Rython module will understand
transformed_args = [transform_argument(a) for a in args]
# generate the code that executes this method
code = "%(method_name)s(%(arguments)s)" % dict(
method_name=method_name,
arguments=", ".join([repr(a) for a in transformed_args]),
)
# evaluate this method in the context
return context.evaluate_on_instance(ruby_context_address=self.__ruby_context_address, code=code)
# return our method proxy
return method_proxy
def __transform_argument(self, arg):
"""outputs a representation of the object that can be
interpreted in the Ruby context"""
if hasattr(arg, "ruby_context_address"):
class RubyExpression(object):
def __repr__(self):
return "Rython::registry.get_proxy(%r)" % arg.ruby_context_address
return RubyExpression()
elif isinstance(arg, bool):
class RubyBool(object):
def __repr__(self):
return "true" if arg else "false"
return RubyBool()
elif arg is None:
class RubyNil(object):
def __repr__(self):
return "nil"
return RubyNil()
else:
# http://www.tldp.org/HOWTO/XML-RPC-HOWTO/xmlrpc-howto-intro.html#xmlrpc-howto-types
# TODO: complex array types (use xmlrpc.dumps)
# TODO: struct types (use xmlrpc.dumps)
# TODO: datetime types (DateTime.from_timestamp(<timestamp in unix time>))
# TODO: error out on non-basic Python objects that don't have a ruby_context_address
return arg
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"LocationExtensionTargetingCriterionFieldEnum",},
)
class LocationExtensionTargetingCriterionFieldEnum(proto.Message):
r"""Values for Location Extension Targeting criterion fields."""
class LocationExtensionTargetingCriterionField(proto.Enum):
r"""Possible values for Location Extension Targeting criterion
fields.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADDRESS_LINE_1 = 2
ADDRESS_LINE_2 = 3
CITY = 4
PROVINCE = 5
POSTAL_CODE = 6
COUNTRY_CODE = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
"""Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import urllib # For urllib.parse.unquote
from string import hexdigits
from collections import namedtuple, OrderedDict
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
#
# Accumulator for header folding
#
class _Folded:
def __init__(self, maxlen, policy):
self.maxlen = maxlen
self.policy = policy
self.lastlen = 0
self.stickyspace = None
self.firstline = True
self.done = []
self.current = []
def newline(self):
self.done.extend(self.current)
self.done.append(self.policy.linesep)
self.current.clear()
self.lastlen = 0
def finalize(self):
if self.current:
self.newline()
def __str__(self):
return ''.join(self.done)
def append(self, stoken):
self.current.append(stoken)
def append_if_fits(self, token, stoken=None):
if stoken is None:
stoken = str(token)
l = len(stoken)
if self.stickyspace is not None:
stickyspace_len = len(self.stickyspace)
if self.lastlen + stickyspace_len + l <= self.maxlen:
self.current.append(self.stickyspace)
self.lastlen += stickyspace_len
self.current.append(stoken)
self.lastlen += l
self.stickyspace = None
self.firstline = False
return True
if token.has_fws:
ws = token.pop_leading_fws()
if ws is not None:
self.stickyspace += str(ws)
stickyspace_len += len(ws)
token._fold(self)
return True
if stickyspace_len and l + 1 <= self.maxlen:
margin = self.maxlen - l
if 0 < margin < stickyspace_len:
trim = stickyspace_len - margin
self.current.append(self.stickyspace[:trim])
self.stickyspace = self.stickyspace[trim:]
stickyspace_len = trim
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.lastlen = l + stickyspace_len
self.stickyspace = None
self.firstline = False
return True
if not self.firstline:
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.stickyspace = None
self.firstline = False
return True
if self.lastlen + l <= self.maxlen:
self.current.append(stoken)
self.lastlen += l
return True
if l < self.maxlen:
self.newline()
self.current.append(stoken)
self.lastlen = l
return True
return False
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
#
# Folding API
#
# parts():
#
# return a list of objects that constitute the "higher level syntactic
# objects" specified by the RFC as the best places to fold a header line.
# The returned objects must include leading folding white space, even if
# this means mutating the underlying parse tree of the object. Each object
# is only responsible for returning *its* parts, and should not drill down
# to any lower level except as required to meet the leading folding white
# space constraint.
#
# _fold(folded):
#
# folded: the result accumulator. This is an instance of _Folded.
# (XXX: I haven't finished factoring this out yet, the folding code
# pretty much uses this as a state object.) When the folded.current
# contains as much text as will fit, the _fold method should call
# folded.newline.
# folded.lastlen: the current length of the test stored in folded.current.
# folded.maxlen: The maximum number of characters that may appear on a
# folded line. Differs from the policy setting in that "no limit" is
# represented by +inf, which means it can be used in the trivially
# logical fashion in comparisons.
#
# Currently no subclasses implement parts, and I think this will remain
# true. A subclass only needs to implement _fold when the generic version
# isn't sufficient. _fold will need to be implemented primarily when it is
# possible for encoded words to appear in the specialized token-list, since
# there is no generic algorithm that can know where exactly the encoded
# words are allowed. A _fold implementation is responsible for filling
# lines in the same general way that the top level _fold does. It may, and
# should, call the _fold method of sub-objects in a similar fashion to that
# of the top level _fold.
#
# XXX: I'm hoping it will be possible to factor the existing code further
# to reduce redundancy and make the logic clearer.
@property
def parts(self):
klass = self.__class__
this = []
for token in self:
if token.startswith_fws():
if this:
yield this[0] if len(this)==1 else klass(this)
this.clear()
end_ws = token.pop_trailing_ws()
this.append(token)
if end_ws:
yield klass(this)
this = [end_ws]
if this:
yield this[0] if len(this)==1 else klass(this)
def startswith_fws(self):
return self[0].startswith_fws()
def pop_leading_fws(self):
if self[0].token_type == 'fws':
return self.pop(0)
return self[0].pop_leading_fws()
def pop_trailing_ws(self):
if self[-1].token_type == 'cfws':
return self.pop(-1)
return self[-1].pop_trailing_ws()
@property
def has_fws(self):
for part in self:
if part.has_fws:
return True
return False
def has_leading_comment(self):
return self[0].has_leading_comment()
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or float("+inf")
folded = _Folded(maxlen, policy)
self._fold(folded)
folded.finalize()
return str(folded)
def as_encoded_word(self, charset):
# This works only for things returned by 'parts', which include
# the leading fws, if any, that should be used.
res = []
ws = self.pop_leading_fws()
if ws:
res.append(ws)
trailer = self.pop(-1) if self[-1].token_type=='fws' else ''
res.append(_ew.encode(str(self), charset))
res.append(trailer)
return ''.join(res)
def cte_encode(self, charset, policy):
res = []
for part in self:
res.append(part.cte_encode(charset, policy))
return ''.join(res)
def _fold(self, folded):
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# XXX: this should be a policy setting
charset = 'utf-8'
tstr = part.cte_encode(charset, folded.policy)
tlen = len(tstr)
if folded.append_if_fits(part, tstr):
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
# Peel off the leading whitespace and make it sticky, to
# avoid infinite recursion.
folded.stickyspace = str(part.pop(0))
if folded.append_if_fits(part):
continue
if part.has_fws:
part._fold(folded)
continue
# There are no fold points in this one; it is too long for a single
# line and can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
def pprint(self, indent=''):
print('\n'.join(self._pp(indent='')))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=''))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
def _fold(self, folded):
last_ew = None
for part in self.parts:
tstr = str(part)
is_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None:
# We've already done an EW, combine this one with it
# if there's room.
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
oldlastlen = sum(len(x) for x in folded.current[:last_ew])
schunk = str(chunk)
lchunk = len(schunk)
if oldlastlen + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = oldlastlen + lchunk
continue
tstr = part.as_encoded_word(charset)
is_ew = True
if folded.append_if_fits(part, tstr):
if is_ew:
last_ew = len(folded.current) - 1
continue
if is_ew or last_ew:
# It's too big to fit on the line, but since we've
# got encoded words we can use encoded word folding.
part._fold_as_ew(folded)
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
folded.stickyspace = str(ws)
if folded.append_if_fits(part):
continue
if part.has_fws:
part.fold(folded)
continue
# It can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
last_ew = None
def cte_encode(self, charset, policy):
res = []
last_ew = None
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
if last_ew is None:
res.append(part.cte_encode(charset, policy))
last_ew = len(res)
else:
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res.append(tl.as_encoded_word())
return ''.join(res)
class Phrase(TokenList):
token_type = 'phrase'
def _fold(self, folded):
# As with Unstructured, we can have pure ASCII with or without
# surrogateescape encoded bytes, or we could have unicode. But this
# case is more complicated, since we have to deal with the various
# sub-token types and how they can be composed in the face of
# unicode-that-needs-CTE-encoding, and the fact that if a token a
# comment that becomes a barrier across which we can't compose encoded
# words.
last_ew = None
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
has_ew = False
try:
str(part).encode('us-ascii')
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None and not part.has_leading_comment():
# We've already done an EW, let's see if we can combine
# this one with it. The last_ew logic ensures that all we
# have at this point is atoms, no comments or quoted
# strings. So we can treat the text between the last
# encoded word and the content of this token as
# unstructured text, and things will work correctly. But
# we have to strip off any trailing comment on this token
# first, and if it is a quoted string we have to pull out
# the content (we're encoding it, so it no longer needs to
# be quoted).
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
schunk = str(chunk)
lchunk = len(schunk)
if last_ew + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = sum(len(x) for x in folded.current)
continue
tstr = part.as_encoded_word(charset)
tlen = len(tstr)
has_ew = True
if folded.append_if_fits(part, tstr):
if has_ew and not part.comments:
last_ew = len(folded.current) - 1
elif part.comments or part.token_type == 'quoted-string':
# If a comment is involved we can't combine EWs. And if a
# quoted string is involved, it's not worth the effort to
# try to combine them.
last_ew = None
continue
part._fold(folded)
def cte_encode(self, charset, policy):
res = []
last_ew = None
is_ew = False
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
is_ew = True
if last_ew is None:
if not part.comments:
last_ew = len(res)
res.append(part.cte_encode(charset, policy))
elif not part.has_leading_comment():
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res[last_ew:] = [tl.as_encoded_word(charset)]
if part.comments or (not is_ew and part.token_type == 'quoted-string'):
last_ew = None
return ''.join(res)
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
def has_leading_comment(self):
return bool(self.comments)
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
@property
def encoded(self):
if self.cte is not None:
return self.cte
_ew.encode(str(self), self.charset)
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', '\(').replace(
')', '\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
return x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
class AddrSpec(TokenList):
token_type = 'addr-spec'
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
class DisplayName(Phrase):
token_type = 'display-name'
@property
def display_name(self):
res = TokenList(self)
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = OrderedDict()
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts)
# XXX: there might be more recovery we could do here if, for
# example, this is really a case of a duplicate attribute name.
value_parts = []
charset = parts[0][1].charset
for i, (section_number, param) in enumerate(parts):
if section_number != i:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent multipart parameter numbering"))
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
@property
def parts(self):
if self and self[-1].token_type == 'mime-parameters':
# We don't want to start a new line if all of the params don't fit
# after the value, so unwrap the parameter list.
return TokenList(self[:-1] + self[-1])
return TokenList(self).parts
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
class Header(TokenList):
token_type = 'header'
def _fold(self, folded):
folded.append(str(self.pop(0)))
folded.lastlen = len(folded.current[0])
# The first line of the header is different from all others: we don't
# want to start a new object on a new line if it has any fold points in
# it that would allow part of it to be on the first header line.
# Further, if the first fold point would fit on the new line, we want
# to do that, but if it doesn't we want to put it on the first line.
# Folded supports this via the stickyspace attribute. If this
# attribute is not None, it does the special handling.
folded.stickyspace = str(self.pop(0)) if self[0].token_type == 'cfws' else ''
rest = self.pop(0)
if self:
raise ValueError("Malformed Header token list")
rest._fold(folded)
#
# Terminal classes and instances
#
class Terminal(str):
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def cte_encode(self, charset, policy):
value = str(self)
try:
value.encode('us-ascii')
return value
except UnicodeEncodeError:
return _ew.encode(value, charset)
def pop_trailing_ws(self):
# This terminates the recursion.
return None
def pop_leading_fws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def has_leading_comment(self):
return False
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
has_fws = True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
has_fws = False
def as_encoded_word(self, charset):
return _ew.encode(str(self), charset)
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
@property
def encoded(self):
return self[:]
def __str__(self):
return ''
has_fws = True
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']','\]'))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
'\\','\\\\').replace(']','\]'))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except ValueError:
raise errors.HeaderParseError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII an NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"add-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the reminaing value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammer we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content trnasfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
|
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn2, venn2_circles
def vdoc_plot(overlap):
plt.figure(figsize=(13,13), facecolor="white")
#syntax: set1, set2, set1x2...
subset_tuple=(5,2,overlap)
v = venn2(subsets=subset_tuple, set_labels = ('A', 'B', 'C'))
v.get_patch_by_id('100').set_alpha(0.1)
v.get_patch_by_id('100').set_color('gray')
if overlap != 0:
v.get_patch_by_id('110').set_color('green')
v.get_patch_by_id('110').set_alpha(0.7)
v.get_label_by_id('110').set_text('Consciousnes')
v.get_patch_by_id('010').set_alpha(0.4)
v.get_label_by_id('100').set_text('Set of all qualia')
v.get_label_by_id('010').set_text('Set of all concurrent\n mental processes')
v.get_label_by_id('A').set_text('')
v.get_label_by_id('B').set_text('')
c = venn2_circles(subsets=subset_tuple)
c[0].set_ls('dotted')
c[1].set_ls('dashed')
plt.title("Venn Diagram of Consciousnes")
from matplotlib.transforms import Affine2D
ax = plt.gca()
center = [np.mean(ax.get_xlim()), np.mean(ax.get_ylim())]
t = Affine2D().rotate_deg_around(center[0], center[1], 90) + ax.transData
for v in ax.patches + ax.texts:
v.set_transform(t)
yl = ax.get_ylim()
plt.ylim(yl[0]-0.2, yl[1]+0.2)
plt.show()
|
#!/usr/bin/env python3
class SelectBase:
API_PREFIX = None
DB_PREFIX = None
def __init__(self, db):
self.db = db
@classmethod
def set_defaults(klass, params):
"""
Responsible for setting default values of the query parameters.
"""
raise NotImplementedError
@classmethod
def sanitize_params(klass, params):
"""
Responsible for raising :py:exc:`AssertionError` in case of wrong input.
"""
raise NotImplementedError
@classmethod
def db_to_api(klass, row):
"""
Converts data from the database into the API format.
"""
raise NotImplementedError
@classmethod
def filter_params(klass, params, *, generator=False):
new_params = {}
for key, value in params.items():
prefix = klass.API_PREFIX
if generator is True:
prefix = "g" + prefix
if key.startswith(prefix):
new_key = key[len(prefix):]
new_params[new_key] = value
return new_params
def execute_sql(self, query, *, explain=False):
if explain is True:
from ws.db.database import explain
result = self.db.engine.execute(explain(query))
print(query)
for row in result:
print(row[0])
return self.db.engine.execute(query)
|
#!/usr/bin/env python
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import os
import sys
from os.path import join, exists, abspath
from os import environ
import glob
import argparse
import shutil
import platform
if environ.get('SPARK_HOME') is None:
print('SPARK_HOME not set')
sys.exit(1)
else:
spark_home = environ.get('SPARK_HOME')
spark_path = join(spark_home, 'bin', 'spark-submit')
# error help print
def print_usage_and_exit():
print('Usage: ./systemml-spark-submit.py -f <dml-filename> [arguments]')
sys.exit(1)
cparser = argparse.ArgumentParser(description='System-ML Spark Submit Script')
# SPARK-SUBMIT Options
cparser.add_argument('--master', default='local[*]', help='local, yarn-client, yarn-cluster', metavar='')
cparser.add_argument('--driver-memory', default='5G', help='Memory for driver (e.g. 512M)', metavar='')
cparser.add_argument('--num-executors', default='2', help='Number of executors to launch', metavar='')
cparser.add_argument('--executor-memory', default='2G', help='Memory per executor', metavar='')
cparser.add_argument('--executor-cores', default='1', help='Number of cores', metavar='')
cparser.add_argument('--conf', help='Spark configuration file', nargs='+', metavar='')
# SYSTEM-ML Options
cparser.add_argument('-nvargs', help='List of attributeName-attributeValue pairs', nargs='+', metavar='')
cparser.add_argument('-args', help='List of positional argument values', metavar='', nargs='+')
cparser.add_argument('-config', help='System-ML configuration file (e.g SystemML-config.xml)', metavar='')
cparser.add_argument('-exec', default='hybrid_spark', help='System-ML backend (e.g spark, spark-hybrid)', metavar='')
cparser.add_argument('-explain', help='explains plan levels can be hops, runtime, '
'recompile_hops, recompile_runtime', nargs='?', const='runtime', metavar='')
cparser.add_argument('-debug', help='runs in debug mode', action='store_true')
cparser.add_argument('-stats', help='Monitor and report caching/recompilation statistics, '
'heavy hitter <count> is 10 unless overridden', nargs='?', const='10', metavar='')
cparser.add_argument('-gpu', help='uses CUDA instructions when reasonable, '
'set <force> option to skip conservative memory estimates '
'and use GPU wherever possible', nargs='?')
cparser.add_argument('-f', required=True, help='specifies dml/pydml file to execute; '
'path can be local/hdfs/gpfs', metavar='')
args = cparser.parse_args()
# Optional arguments
ml_options = []
if args.nvargs is not None:
ml_options.append('-nvargs')
ml_options.append(' '.join(args.nvargs))
if args.args is not None:
ml_options.append('-args')
ml_options.append(' '.join(args.args))
if args.debug is not False:
ml_options.append('-debug')
if args.explain is not None:
ml_options.append('-explain')
ml_options.append(args.explain)
if args.gpu is not None:
ml_options.append('-gpu')
ml_options.append(args.gpu)
if args.stats is not None:
ml_options.append('-stats')
ml_options.append(args.stats)
# Assign script file to name received from argparse module
script_file = args.f
# find the systemML root path which contains the bin folder, the script folder and the target folder
# tolerate path with spaces
script_dir = os.path.dirname(os.path.realpath(__file__))
project_root_dir = os.path.dirname(script_dir)
user_dir = os.getcwd()
scripts_dir = join(project_root_dir, 'scripts')
build_dir = join(project_root_dir, 'target')
lib_dir = join(build_dir, 'lib')
systemml_jar = build_dir + os.sep + "SystemML.jar"
jcuda_jars = glob.glob(lib_dir + os.sep + "jcu*.jar")
target_jars = ','.join(jcuda_jars) # Include all JCuda Jars
log4j_properties_path = join(project_root_dir, 'conf', 'log4j.properties.template')
build_err_msg = 'You must build the project before running this script.'
build_dir_err_msg = 'Could not find target directory ' + build_dir + '. ' + build_err_msg
# check if the project had been built and the jar files exist
if not (exists(build_dir)):
print(build_dir_err_msg)
sys.exit(1)
print('================================================================================')
# if the present working directory is the project root or bin folder, then use the temp folder as user.dir
if user_dir == project_root_dir or user_dir == join(project_root_dir, 'bin'):
user_dir = join(project_root_dir, 'temp')
print('Output dir: ' + user_dir)
# if the SystemML-config.xml does not exist, create it from the template
systemml_config_path = join(project_root_dir, 'conf', 'SystemML-config.xml')
systemml_template_config_path = join(project_root_dir, 'conf', 'SystemML-config.xml.template')
if not (exists(systemml_config_path)):
shutil.copyfile(systemml_template_config_path, systemml_config_path)
print('... created ' + systemml_config_path)
# if SystemML-config.xml is provided as arguments
if args.config is None:
systemml_config_path_arg = systemml_config_path
else:
systemml_config_path_arg = args.config
# from http://stackoverflow.com/questions/1724693/find-a-file-in-python
def find_file(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return join(root, name)
return None
# if the script file path was omitted, try to complete the script path
if not (exists(script_file)):
script_file_name = abspath(script_file)
script_file_found = find_file(script_file, scripts_dir)
if script_file_found is None:
print('Could not find DML script: ' + script_file)
print_usage_and_exit()
else:
script_file = script_file_found
print('DML Script:' + script_file)
default_conf = 'spark.driver.extraJavaOptions=-Dlog4j.configuration=file:{}'.format(log4j_properties_path)
# Backslash problem in windows.
if platform.system() == 'Windows':
default_conf = default_conf.replace('\\', '//')
if args.conf is not None:
conf = ' --conf '.join(args.conf + [default_conf])
else:
conf = default_conf
cmd_spark = [spark_path, '--class', 'org.apache.sysml.api.DMLScript',
'--master', args.master, '--driver-memory', args.driver_memory,
'--num-executors', args.num_executors, '--executor-memory', args.executor_memory,
'--executor-cores', args.executor_cores, '--conf', conf, '--jars', target_jars,
systemml_jar]
cmd_system_ml = ['-config', systemml_config_path_arg,
'-exec', vars(args)['exec'], '-f', script_file, ' '.join(ml_options)]
cmd = cmd_spark + cmd_system_ml
return_code = os.system(' '.join(cmd))
# For debugging
# print(' '.join(cmd))
if return_code != 0:
print('Failed to run SystemML. Exit code :' + str(return_code))
print(' '.join(cmd))
|
#
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
import numpy
from .layer import CompoundLayer
from .contact import Contact, OhmicContact
from .solver import (poisson_eq, poisson_zero_current,
capacitance_zero_current)
from .solution import ParametersSolution, FlatbandSolution
from .config import cfg
__all__ = ['TwoTerminalDevice']
BLUE = '#072a9f'
RED = '#d62315'
# electron charge
q = 1.602176565e-19 # C
#TODO: test and use this decorator
def _cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
class TwoTerminalDevice(object):
'''
A two terminal device composed of a number of layers with two contacts
(left/top and right/bottom).
'''
def __init__(self, layers, contacts=None, Fp=None, Fn=None):
'''
Parameters
----------
layers : list of `Layer`s
layers
contacts : list of `Contact`s (default=None)
contacts; if None, defaults to two `OhmicContact`s
Fp : str or list of str's
Specifies control of the hole quasi-Fermi energy.
'left' for control by the left contact.
'right' for control by the right contact.
None for Fp = inf.
Fn : str or list of str's
Specifies control of the electron quasi-Fermi energy.
'left' for control by the left contact.
'right' for control by the right contact.
None for Fn = -inf.
'''
# Cache
self._Fp_Fn = {}
self._parameters = {}
self._flatband = {}
self._equilibrium = {}
self._zero_current = {}
self._capacitance = {}
self._layer = CompoundLayer(layers)
if contacts is None:
self._contacts = [OhmicContact(), OhmicContact()]
elif len(contacts) != 2:
raise ValueError('There must be exactly two contacts.')
else:
for contact in contacts:
if not isinstance(contact, Contact):
raise TypeError('Contacts must be instances of '
'the `Contact` class.')
self._contacts = contacts
if hasattr(Fp, '__iter__') and len(Fp) != len(layers):
raise TypeError('len(Fp) != len(layers)')
if hasattr(Fn, '__iter__') and len(Fn) != len(layers):
raise TypeError('len(Fn) != len(layers)')
self._Fp = Fp
self._Fn = Fn
def get_flatband(self, T=300.):
'''
returns x, Ev, Ec, Ei
x will be numpy.array([0, ..., thickness])
Ev will be numpy.array([VBO, ..., VBO])
Ec will be numpy.array([CBO, ..., CBO])
Ei will be numpy.array([VBO+Ei, ..., VBO+Ei])
Arguments
---------
T : float
the temperature
'''
x, Ev, Ec, Ei = self._layer.get_flatband(T)
return numpy.array(x), numpy.array(Ev), numpy.array(Ec), numpy.array(Ei)
#TODO: make this consistent with the other show_* methods.
def show_flatband(self, T=300.):
'''
Show a plot of the band profile at flatband.
Arguments
---------
T : float (default=300.)
the temperature
'''
import matplotlib.pyplot as plt
_, ax = plt.subplots()
x, Ev, Ec, Ei = self.get_flatband(T=T)
x = x*1e7 # nm
ax.plot(x, Ev, 'r-', label='$E_v$')
ax.plot(x, Ec, 'b-', label='$E_c$')
ax.plot(x, Ei, 'k:', label='$E_i$')
ax.set_ylabel('Energy (eV)')
ax.set_xlabel('Depth (nm)')
plt.show()
def _get_x(self, N):
return numpy.linspace(0, self._layer.get_thickness(), N)
def _calc_Fp_Fn(self, N):
if self._Fp is None:
Fp = [None]*N
elif not hasattr(self._Fp, '__iter__'):
Fp = [self._Fp]*N
else:
layer_xs = []
last_x = 0
for layer in self._layer:
next_x = last_x + layer.get_thickness()
layer_xs.append(next_x)
last_x = next_x
layer_xs = numpy.array(layer_xs)
Fp = []
for x in self._get_x(N):
i = numpy.searchsorted(layer_xs, x)
Fp.append(self._Fp[i])
if self._Fn is None:
Fn = [None]*N
elif not hasattr(self._Fn, '__iter__'):
Fn = [self._Fn]*N
else:
layer_xs = []
last_x = 0
for layer in self._layer:
next_x = last_x + layer.get_thickness()
layer_xs.append(next_x)
last_x = next_x
layer_xs = numpy.array(layer_xs)
Fn = []
for x in self._get_x(N):
i = numpy.searchsorted(layer_xs, x)
Fn.append(self._Fn[i])
return Fp, Fn
def _get_Fp_Fn(self, N):
if N in self._Fp_Fn:
return self._Fp_Fn[N]
else:
s = self._calc_Fp_Fn(N)
self._Fp_Fn[N] = s
return s
def _get_materials(self, N):
return [self._layer.get_material(x_i) for x_i in self._get_x(N)]
def _calc_parameters(self, T, N):
s = ParametersSolution()
s.materials = materials = self._get_materials(N)
for p in ['VBO', 'CBO_Gamma', 'CBO_L', 'CBO_X', 'CBO', 'Ei', 'ni',
'dielectric', 'Na', 'Nd', 'Nnet', 'Nc_Gamma', 'Nc_L',
'Nc_X', 'Nc', 'Nv', 'nonparabolicity', 'electron_affinity']:
value = numpy.array([getattr(m, p)(T=T) for m in materials],
dtype=float)
setattr(s, p, value)
return s
def _get_parameters(self, T, N):
if (T, N) in self._parameters:
return self._parameters[(T, N)]
else:
s = self._calc_parameters(T, N)
self._parameters[(T, N)] = s
return s
def _calc_flatband(self, T, N):
x = self._get_x(N)
p = self._get_parameters(T, N)
return FlatbandSolution(T=T, N=N, x=x,
Ev=p.VBO,
Ec_Gamma=p.CBO_Gamma,
Ec_L=p.CBO_L,
Ec_X=p.CBO_X,
Ec=p.CBO,
Ei=p.VBO+p.Ei)
def _get_flatband(self, T, N):
if (T, N) in self._flatband:
return self._flatband[(T, N)]
else:
s = self._calc_flatband(T, N)
self._flatband[(T, N)] = s
return s
def _calc_equilibrium(self, T, N, approx='kane'):
solution = poisson_eq(self, T=T, N=N, approx=approx)
self._equilibrium[(T, N, approx)] = solution
return solution
def get_thickness(self):
return self._layer.get_thickness()
def _has_equilibrium(self, T=300., N=1000, approx='kane'):
'''
Returns True if the equilbrium solution is cached.
Arguments
---------
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
return (T, N, approx) in self._equilibrium
def get_equilibrium(self, T=300., N=1000, approx='kane'):
'''
Returns an `EquilibriumSolution` instance.
Arguments
---------
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
if self._has_equilibrium(T, N, approx):
return self._equilibrium[(T, N, approx)]
else:
return self._calc_equilibrium(T, N, approx)
def show_equilibrium(self, T=300., N=1000, approx='kane'):
'''
Plot and show the band profile at equilibrium.
Arguments
---------
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
solution = self.get_equilibrium(T, N, approx)
x = solution.x*1e7 # nm
import matplotlib.pyplot as plt
plt.style.use(['ggplot'])
_, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col',
figsize=(10, 10),
tight_layout=cfg['plot/tight_layout'])
ax1.set_ymargin(0.05)
ax1.plot(x, solution.Ev, 'r-', label='$E_v$')
ax1.plot(x, solution.Ec, 'b-', label='$E_c$')
ax1.plot(x, solution.Ef, 'k--', label='$E_f$')
ax1.plot(x, solution.Ei, 'k:', label='$E_i$')
ax1.set_ylabel('Energy (eV)')
ax2.set_ymargin(0.05)
if (solution.Na > 0.).any():
ax2.semilogy(x, solution.Na, 'r-', label='$N_A$')
if (solution.Nd > 0.).any():
ax2.semilogy(x, solution.Nd, 'b-', label='$N_D$')
ax2.semilogy(x, solution.p, 'r--', label='$p$')
ax2.semilogy(x, solution.n, 'b--', label='$n$')
ax2.set_ylabel('Concentration (cm$^{-3}$)')
ymin, ymax = ax2.get_ylim()
if ymax/ymin > cfg['plot/semilogy/yrange']:
ax2.set_ylim(ymax/cfg['plot/semilogy/yrange'], ymax)
ax3.set_ymargin(0.05)
(ax3_field,) = ax3.plot(x, solution.field, 'k-')
(ax3_dEv_dx,) = ax3.plot(x, solution.dEv_dx, 'r-', alpha=0.5)
(ax3_dEc_dx,) = ax3.plot(x, solution.dEc_dx, 'b-', alpha=0.5)
ax3.set_ylabel('Effective Field (V/cm)')
ax3.set_xlabel('Depth (nm)')
ax3.yaxis.get_major_formatter().set_powerlimits((-3, 3))
self.filtered_autolim(ax3, solution.dEv_dx, solution.dEc_dx,
solution.field)
plt.show()
def _save_solution(self, s, path):
names = [name for name in dir(s) if not name.startswith('_')]
excludes = ['V', 'N', 'T', 'materials']
for exclude in excludes:
if exclude in names:
names.remove(exclude)
arrays = [getattr(s, name) for name in names]
if not names:
return
header = '\t'.join(names)+'\n'
template = '\t'.join(['{}' for name in names])+'\n'
with open(path, 'w') as f:
f.write(header)
for i in xrange(arrays[0].size):
values = [repr(array[i]) for array in arrays]
f.write(template.format(*values))
def save_equilibrium(self, path, show=False, T=300, N=1000, approx='kane'):
'''
Save the bands at equilibrium.
Arguments
---------
path : string
the file path
show : bool
shows the bands if True
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
if show:
self.show_equilibrium(T, N, approx)
s = self.get_equilibrium(T, N, approx)
self._save_solution(s, path)
def _calc_zero_current(self, V, T, N, approx):
return poisson_zero_current(self, V=V, T=T, N=N, approx=approx)
def has_zero_current(self, V, T=300., N=1000, approx='kane'):
'''
Returns True if the zero current solution is cached.
Arguments
---------
V : float
Bias voltage, i.e. left/top contact bias - right/bottom contact bias
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
return (V, T, N, approx) in self._zero_current
def get_zero_current(self, V, T=300., N=1000, approx='kane'):
'''
Returns a `ZeroCurrentSolution` instance.
Arguments
---------
V : float
Bias voltage, i.e. left/top contact bias - right/bottom contact bias
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
if self.has_zero_current(V, T, N, approx):
return self._zero_current[(V, T, N, approx)]
else:
solution = self._calc_zero_current(V, T, N, approx)
self._zero_current[(V, T, N, approx)] = solution
return solution
def show_zero_current(self, V, T=300., N=1000, approx='kane'):
'''
Plot and show the band profile at a given bias voltage under the
zero-current approximation.
Arguments
---------
V : float
Bias voltage, i.e. left/top contact bias - right/bottom contact bias
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
self._zero_current_image(V, path=None, show=True,
T=300., N=1000, approx='kane')
def save_zero_current_image(self, path, V, show=False,
T=300., N=1000, approx='kane'):
self._zero_current_image(V=V, path=path, show=show,
T=300., N=1000, approx='kane')
def _zero_current_image(self, V, path=None, show=False,
T=300., N=1000, approx='kane'):
solution = self.get_zero_current(V, T, N, approx)
x = solution.x*1e7 # nm
import matplotlib.pyplot as plt
plt.style.use(['ggplot'])
_, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col',
figsize=(10, 10),
tight_layout=cfg['plot/tight_layout'])
ax1.set_ymargin(0.05)
ax2.set_ymargin(0.05)
ax1.plot(x, solution.Ev, 'r-', label='$E_v$')
ax1.plot(x, solution.Ec, 'b-', label='$E_c$')
ax1.plot(x, solution.Fp, 'r--', label='$F_p$')
ax1.plot(x, solution.Fn, 'b--', label='$F_n$')
ax1.plot(x, solution.Ei, 'k:', label='$E_i$')
ax1.set_ylabel('Energy (eV)')
ax2.set_ymargin(0.05)
if (solution.Na > 0.).any():
ax2.semilogy(x, solution.Na, 'r-', label='$N_A$')
if (solution.Nd > 0.).any():
ax2.semilogy(x, solution.Nd, 'b-', label='$N_D$')
ax2.semilogy(x, solution.p, 'r--', label='$p$')
ax2.semilogy(x, solution.n, 'b--', label='$n$')
ax2.set_ylabel('Concentration (cm$^{-3}$)')
ymin, ymax = ax2.get_ylim()
if ymax/ymin > cfg['plot/semilogy/yrange']:
ax2.set_ylim(ymax/cfg['plot/semilogy/yrange'], ymax)
ax3.set_ymargin(0.05)
dEv_dx = numpy.empty_like(solution.dEv_dx)
dEc_dx = numpy.empty_like(solution.dEc_dx)
(ax3_field,) = ax3.plot(x, solution.field, 'k-')
(ax3_dEv_dx,) = ax3.plot(x, solution.dEv_dx, 'r-', alpha=0.5)
(ax3_dEc_dx,) = ax3.plot(x, solution.dEc_dx, 'b-', alpha=0.5)
ax3.axhline(0, color='grey')
ax3.set_ylabel('Effective Field (V/cm)')
ax3.set_xlabel('Depth (nm)')
ax3.yaxis.get_major_formatter().set_powerlimits((-3, 3))
self.filtered_autolim(ax3, solution.dEv_dx, solution.dEc_dx,
solution.field)
if path is not None:
plt.savefig(path)
if show:
plt.show()
def interactive_zero_current(self, T=300., N=1000, approx='kane'):
'''
Arguments
---------
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
solution = self.get_zero_current(0., T, N, approx)
x = solution.x*1e7 # nm
import matplotlib.pyplot as plt
plt.style.use(['ggplot'])
fig = plt.figure(figsize=(10, 10),
#facecolor='white', edgecolor='white',
tight_layout=cfg['plot/tight_layout'])
ax3 = plt.subplot2grid(shape=(3,15), loc=(2,0), colspan=14)
ax2 = plt.subplot2grid(shape=(3,15), loc=(1,0), colspan=14, sharex=ax3)
ax1 = plt.subplot2grid(shape=(3,15), loc=(0,0), colspan=14, sharex=ax3)
ax4 = plt.subplot2grid(shape=(3,15), loc=(0,14), sharey=ax1)
ax4.get_xaxis().set_visible(False)
ax4.get_yaxis().set_visible(False)
ax4.axhline(0, color='grey')
ax4_V = ax4.axhline(y=0., color='k', linestyle='-')
ax1.set_ymargin(0.1)
ax2.set_ymargin(0.1)
ax3.set_ymargin(0.1)
(ax1_Ev,) = ax1.plot(x, solution.Ev, 'r-', label='$E_v$')
(ax1_Ec,) = ax1.plot(x, solution.Ec, 'b-', label='$E_c$')
(ax1_Fp,) = ax1.plot(x, solution.Fp, 'r--', label='$F_p$')
(ax1_Fn,) = ax1.plot(x, solution.Fn, 'b--', label='$F_n$')
(ax1_Ei,) = ax1.plot(x, solution.Ei, 'k:', label='$E_i$')
ax1.set_ylabel('Energy (eV)')
nans = numpy.empty_like(x)
nans.fill(numpy.nan)
(ax2_Na,) = ax2.plot(x, solution.Na, 'r-', label='$N_A$')
(ax2_Nd,) = ax2.plot(x, solution.Nd, 'b-', label='$N_D$')
(ax2_p,) = ax2.semilogy(x, solution.p, 'r--', label='$p$')
(ax2_n,) = ax2.semilogy(x, solution.n, 'b--', label='$n$')
ax2.set_ylabel('Concentration (cm$^{-3}$)')
ymin, ymax = ax2.get_ylim()
if ymax/ymin > cfg['plot/semilogy/yrange']:
ax2.set_ylim(ymax/cfg['plot/semilogy/yrange'], ymax)
(ax3_field,) = ax3.plot(x, solution.field, 'k-')
(ax3_dEv_dx,) = ax3.plot(x, solution.dEv_dx, color=RED, alpha=0.7)
(ax3_dEc_dx,) = ax3.plot(x, solution.dEc_dx, color=BLUE, alpha=0.7)
ax3.axhline(0, color='grey')
ax3.set_ylabel('Effective Field (V/cm)')
ax3.set_xlabel('Depth (nm)')
ax3.yaxis.get_major_formatter().set_powerlimits((-3, 3))
self.filtered_autolim(ax3, solution.dEv_dx, solution.dEc_dx,
solution.field)
# new_ymin = solution.field.min()
# new_ymax = solution.field.max()
# if ax3._ymargin > 0:
# delta = (new_ymax - new_ymin) * ax3._ymargin
# new_ymin -= delta
# new_ymax += delta
# ax3.set_ybound(new_ymin, new_ymax)
def onclick(event):
if event.inaxes != ax4:
return
V = event.ydata
ax4_V.set_data(([0, 1], [V, V]))
solution = self.get_zero_current(V, T, N, approx)
ax1_Ev.set_data(x, solution.Ev)
ax1_Ec.set_data(x, solution.Ec)
ax1_Fp.set_data(x, solution.Fp)
ax1_Fn.set_data(x, solution.Fn)
ax1_Ei.set_data(x, solution.Ei)
# if (solution.Na > 0.).any():
# ax2_Na.set_data(x, solution.Na)
# else:
# ax2_Na.set_data(x, nans)
# if (solution.Nd > 0.).any():
# ax2_Nd.set_data(x, solution.Nd)
# else:
# ax2_Nd.set_data(x, solution.Nd)
ax2_p.set_data(x, solution.p)
ax2_n.set_data(x, solution.n)
ax3_dEv_dx.set_data(x, solution.dEv_dx)
ax3_dEc_dx.set_data(x, solution.dEc_dx)
ax3_field.set_data(x, solution.field)
for ax in [ax1, ax2]:
old_ymin, old_ymax = ax.get_ylim()
ax.relim()
new_ymin, new_ymax = ax.dataLim.intervaly
if ax._ymargin > 0:
delta = (new_ymax - new_ymin) * ax._ymargin
new_ymin -= delta
new_ymax += delta
ax.set_ybound(min(old_ymin, new_ymin), max(old_ymax, new_ymax))
ymin, ymax = ax2.get_ylim()
if ymax/ymin > cfg['plot/semilogy/yrange']:
ax2.set_ybound(ymax/cfg['plot/semilogy/yrange'], ymax)
self.filtered_autolim(ax3, solution.dEv_dx, solution.dEc_dx,
solution.field)
fig.canvas.draw()
_cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
@classmethod
def filtered_autolim(cls, ax, *fields):
threshold1 = 10
threshold2 = 2
fmin = numpy.inf
fmax = -numpy.inf
for field in fields:
for i in xrange(2, field.size-2):
delta_m2 = abs(field[i]-field[i-2])
delta_m1 = abs(field[i]-field[i-1])
delta_p1 = abs(field[i]-field[i+1])
delta_p2 = abs(field[i]-field[i+2])
# if (delta_m1 ~= delta_m2 and
# delta_p1 << delta_m1 and
# delta_p2 >> delta_p1):
if (delta_m1 < delta_m2*threshold2 and
delta_p1*threshold1 < delta_m1 and
delta_p2 > delta_p1*threshold1):
continue
# if (delta_m1 << delta_m2 and
# delta_p1 >> delta_m1 and
# delta_p2 ~= delta_p1):
if (delta_m1*threshold1 < delta_m2 and
delta_p1 > delta_m1*threshold1 and
delta_p2 < delta_p1*threshold2):
continue
fmin = min(fmin, field[i])
fmax = max(fmax, field[i])
if ax._ymargin > 0:
delta = (fmax - fmin) * ax._ymargin
fmin -= delta
fmax += delta
ax.set_ybound(fmin, fmax)
def save_zero_current(self, V, path, show=False, T=300, N=1000,
approx='kane'):
'''
Save the band profile data and image at a given bias voltage under the
zero-current approximation.
Arguments
---------
V : float
Bias voltage, i.e. left/top contact bias - right/bottom contact bias
path : string
the file path without (excluding file extension)
show : bool
shows the bands if True
T : float (default=300.)
Device temperature
N : int (default=1000)
Number of grid points
approx : str (default ='kane')
If 'boltzmann', use the Boltzmann (non-degenerate) and parabolic
bands approximation (fastest). If 'parabolic', use the parabolic
bands approximation (fast). If 'kane', include Gamma-valley
non-parabolicity under the k.p Kane approximation (slow).
'''
s = self.get_zero_current(V, T, N, approx)
self._save_solution(s, path=path+'.txt')
self._zero_current_image(V=V, path=path+'.png', show=show,
T=T, N=N, approx=approx)
def _calc_capacitance(self, V, dV, T=300, N=1000, approx='kane'):
return capacitance_zero_current(self, V, dV, T, N, approx)
def get_capacitance(self, V, dV=1e-3, T=300, N=1000, approx='kane'):
'''
Returns
-------
C : float
capacitance in units of F/cm**2
'''
if (V, dV, T, N, approx) in self._capacitance:
return self._capacitance[(V, dV, T, N, approx)]
else:
s = self._calc_capacitance(V, dV, T, N, approx)
self._capacitance[(V, dV, T, N, approx)] = s
return s
def get_cv(self, Vstart, Vstop, Vnum=100, dV=1e-3, T=300, N=1000,
approx='kane'):
'''
Returns
-------
C : ndarray
capacitance in units of F/cm**2
V : ndarray
bias voltage in units of V
'''
V = numpy.linspace(Vstart, Vstop, Vnum)
C = numpy.empty(Vnum)
for i in xrange(Vnum):
C[i] = self.get_capacitance(V[i], dV, T, N, approx)
return C, V
def show_cv(self, Vstart, Vstop, Vnum=50, dV=1e-3, T=300, N=1000,
approx='kane'):
C, V = self.get_cv(Vstart, Vstop, Vnum, dV, T, N, approx)
rCs = 1/C**2
ndV_drCs = (-1.)/numpy.gradient(rCs, (V[1]-V[0]))
import matplotlib.pyplot as plt
plt.style.use(['ggplot'])
_, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col',
figsize=(10, 10),
tight_layout=cfg['plot/tight_layout'])
ax1.set_ymargin(0.05)
ax1.plot(V, C, 'r-')
# ax1.axhline(0, color='grey')
ax1.set_ylabel('Capacitance (F/cm$^2$)')
ax2.set_ymargin(0.05)
ax2.plot(V, rCs, 'r-')
ax2.set_ylabel('1/C$^2$ (cm$^4$/F$^2$)')
ax3.set_ymargin(0.05)
try:
ax3.semilogy(V, ndV_drCs, 'r-')
except:
ax3.set_yscale('linear')
# ax3.plot(V, dV/numpy.gradient(1/C**2), 'r-')
ax3.set_ylabel('-dV/d(1/C$^2$)')
ax3.set_xlabel('Bias (V)')
plt.show()
def save_cv(self, path, Vstart, Vstop, Vnum=50, dV=1e-3, show=False,
T=300, N=1000, approx='kane'):
C, V = self.get_cv(Vstart, Vstop, Vnum, dV, T, N, approx)
rCs = 1/C**2
ndV_drCs = (-1.)/numpy.gradient(rCs, (V[1]-V[0]))
if show:
import matplotlib.pyplot as plt
plt.style.use(['ggplot'])
_, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col',
figsize=(10, 10),
tight_layout=cfg['plot/tight_layout'])
ax1.set_ymargin(0.05)
ax1.plot(V, C, 'r-')
# ax1.axhline(0, color='grey')
ax1.set_ylabel('Capacitance (F/cm$^2$)')
ax2.set_ymargin(0.05)
ax2.plot(V, rCs, 'r-')
ax2.set_ylabel('1/C$^2$ (cm$^4$/F$^2$)')
ax3.set_ymargin(0.05)
try:
ax3.semilogy(V, ndV_drCs, 'r-')
except:
ax3.set_yscale('linear')
# ax3.plot(V, dV/numpy.gradient(1/C**2), 'r-')
ax3.set_ylabel('-dV/d(1/C$^2$)')
ax3.set_xlabel('Bias (V)')
plt.show()
header = 'V\tC/A\t1/C^2\tdV/d(1/C^2)\n'
template = '{V}\t{C}\t{rCs}\t{ndV_drCs}\n'
with open(path, 'w') as f:
f.write(header)
for i in xrange(V.size):
f.write(template.format(V=V[i], C=C[i],
rCs=rCs[i], ndV_drCs=ndV_drCs[i]))
|
import numpy as np
def fix_mask(data, mask):
"""
Parameters
----------
data : numpy.ndarray or numpy.ma.MaskedArray
Astronomical data cube.
mask : numpy.ndarray
Boolean that will be applied.
Returns
-------
result : numpy.ma.MaskedArray
Masked astronomical data cube.
"""
ismasked = isinstance(data, np.ma.MaskedArray)
if ismasked and mask is None:
return data
else:
return np.ma.MaskedArray(data, mask)
def fix_limits(data, vect):
"""
Fix vect index to be inside data
Parameters
----------
data : numpy.ndarray or numpy.ma.MaskedArray
Astronomical data cube.
vect : tuple, list or numpy.ndarray
Array with the indexes to be fixed.
Returns
-------
result : numpy.ndarray
Fixed array of indexes.
"""
if isinstance(vect, (tuple, list)):
vect = np.array(vect)
vect = vect.astype(int)
low = vect < 0
up = vect > data.shape
if vect.any():
vect[low] = 0
if vect.any():
vect[up] = np.array(data.shape)[up]
return vect
def slab(data, lower=None, upper=None):
"""
Obtain the n-dimensional slab from lower to upper (i.e. slab is a vector of slices)
Parameters
----------
data : numpy.ndarray
Atronomical data cube.
lower : 3-tuple (default=None)
Lower coordinates for the subcube.
upper : 3-tuple (default=None)
Upper coordinates for the subcube.
Returns
-------
result : list
list of slices using lower and upper coordinates to create a subcube.
"""
if lower is None:
lower = np.zeros(data.ndim)
if upper is None:
upper = data.shape
lower = fix_limits(data, lower)
upper = fix_limits(data, upper)
m_slab = []
for i in range(data.ndim):
m_slab.append(slice(lower[i], upper[i]))
return m_slab
def matching_slabs(data, flux, lower, upper):
"""
Obtain the matching subcube inside the lower and upper points.
Paramters
---------
data : numpy.ndarray
First data cube
flux : numpy.ndarray
Second data cubse
lower : tuple
Lower coordinates for the subcube.
upper : tuple
Upper coordinates for the subcube.
Returns
-------
The subcube inside the lower and upper points that matches both data cube dimensions.
"""
data_slab = slab(data, lower, upper)
flow = np.zeros(flux.ndim)
fup = np.array(flux.shape)
for i in range(data.ndim):
if data_slab[i].start == 0:
flow[i] = flux.shape[i] - data_slab[i].stop
if data_slab[i].stop == data.shape[i]:
fup[i] = data_slab[i].stop - data_slab[i].start
flux_slab = slab(flux, flow, fup)
return data_slab, flux_slab
|
"""
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from ..cli import prt
def ckan_parser(cmd):
lib_p = cmd.add_parser('ckan', help='Access a CKAN repository')
lib_p.set_defaults(command='ckan')
lib_p.add_argument('-n','--name', default='default', help='Select the configuration name for the repository')
asp = lib_p.add_subparsers(title='CKAN commands', help='Access a CKAN repository')
sp = asp.add_parser('package', help='Dump a package by name, as json or yaml')
sp.set_defaults(subcommand='package')
sp.add_argument('term', type=str,help='Query term')
group = sp.add_mutually_exclusive_group()
group.add_argument('-y', '--yaml', default=True, dest='use_json', action='store_false')
group.add_argument('-j', '--json', default=True, dest='use_json', action='store_true')
def ckan_command(args,rc):
from ambry.dbexceptions import ConfigurationError
import ambry.client.ckan
import requests
repo_name = args.name
repo_config = rc.datarepo(repo_name)
api = ambry.client.ckan.Ckan( repo_config.url, repo_config.key)
if args.subcommand == 'package':
try:
pkg = api.get_package(args.term)
except requests.exceptions.HTTPError:
return
if args.use_json:
import json
print(json.dumps(pkg, sort_keys=True, indent=4, separators=(',', ': ')))
else:
import yaml
yaml.dump(args, indent=4, default_flow_style=False)
else:
pass
|
# -*- coding: utf-8 -*-
# Copyright Tom SF Haines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from panda3d.core import *
from direct.actor import Actor
class Loading:
"""Does a loading screen - renders some stuff whilst a transition is happenning."""
def __init__(self, xml):
self.node = Actor.Actor('data/misc/loading')
self.node.reparentTo(base.render)
self.node.setShaderAuto()
self.node.hide()
self.light = PointLight('plight')
self.light.setColor(VBase4(1.0, 1.0, 1.0, 1.0))
self.lightNode = self.node.attachNewNode(self.light)
self.lightNode.setPos(0.0, 0.0, 1.5)
self.node.setLight(self.lightNode)
self.task = None
#self.stop()
def reload(self, xml):
pass
def start(self):
self.node.hide()
self.node.stop()
if self.task != None:
taskMgr.remove(self.task)
self.task = None
def stop(self):
self.node.show()
self.node.loop('slide')
self.task = taskMgr.add(self.camPos, 'LoadingCamera')
def destroy(self):
pass
def camPos(self, task):
base.camera.setPos(0.0, 0.0, 20.0)
base.camera.lookAt(0.0, 0.0, 0.0)
return task.cont
|
import sys
sys.path.append('/Users/natj/projects/arcmancer/lib/')
import pyarcmancer as pyac
from img import Imgplane
from visualize_polar import Visualize
from lineprofile import *
import units
import numpy as np
import matplotlib as mpl
from pylab import *
import os
from matplotlib import cm
import scipy.interpolate as interp
#from joblib import Parallel, delayed
#import multiprocessing
outdir = 'out/lines2/'
##################################################
# Set up figure & layout
fig = figure(figsize=(6,10))
mpl.rc('font', family='serif')
mpl.rc('xtick', labelsize='x-small')
mpl.rc('ytick', labelsize='x-small')
mpl.rcParams['image.cmap'] = 'inferno'
#num_cores = multiprocessing.cpu_count()
#print "num of cores {}", num_cores
#Setup pyarcmancer
##################################################
conf = pyac.Configuration()
conf.absolute_tolerance = 1e-12
conf.relative_tolerance = 1e-12
conf.henon_tolerance = 1e-8
conf.sampling_interval = 1e-3
conf.minimum_stepsize = 1e-10
conf.maximum_steps = 10000
conf.enforce_maximum_stepsize = False
conf.enforce_minimum_stepsize = True
conf.enforce_maximum_steps = True
conf.store_only_endpoints = True
#pyac.Log.set_console()
pyac.Log.set_file()
##################################################
# Star parameters
#R = 12.0
#M = 1.6
freq = 700.0
#incl = 15.0
#for M in [1.5, 1.1, 1.8]:
for M in [1.4]:
print "##################################################"
print "M = ", M
for R in [10.0]:
print "##################################################"
print " R = ", R
#for incl in [90, 80, 70, 60, 50, 40, 30, 20, 15, 10, 5, 1]:
#for incl in [9, 8, 7, 6, 4, 3, 2, 0.5]:
for incl in [20.0]:
print "##################################################"
print " i = ",incl
fname = 'neutronstar_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.png'.format( np.int(freq), np.int(R), M, np.int(incl))
if os.path.isfile( outdir+fname ):
continue
# Variables in units of solar mass are derived here
# and typically presented with full name
mass = M
radius = R * units.solar_mass_per_km / mass
angvel = freq * 2.0*np.pi / units.solar_mass_per_s * mass
imgscale = (mass/units.solar_mass_per_km*1.0e5)**2 #cm^2/Msun
compactness = np.sqrt(1 - 2/radius) #isotropic radius compactness
conf.absolute_tolerance = 1e-12 * radius
conf.minimum_stepsize = 1e-10 * radius
##################################################
#Define metric and surfaces of the spacetime
#S+D
metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_no_quadrupole)
ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.spherical)
#Oblate Sch #WORKS
#metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_no_quadrupole)
#ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.agm_no_quadrupole)
#Full AGM + oblate
#metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_standard)
#ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.agm)
surfaces = [ ns_surface ]
# Build and configure image plane by hand
img = Imgplane(conf, metric, surfaces)
img.verbose = 1
img.incl = np.deg2rad(incl) #set inclination
img.distance = 100000.0*mass #set distance
#Locate star edges
img.find_boundaries(Nedge=50, reltol=1.0e-4, max_iterations=30)
#Build internal coarse grid for the interpolation routines
img.generate_internal_grid(Nrad = 80, Nchi = 50 )
img.dissect_geos()
#Construct output xy image plane from img object
##################################################
ion()
visz = Visualize()
visz.gs.update(hspace = 0.5)
visz.compactness = compactness
visz.plot(img)
#prepare line profile axis object
visz.axs[6] = subplot( visz.gs[3,:] )
visz.axs[6].minorticks_on()
visz.axs[6].set_xlabel(r'Energy')
visz.axs[6].set_ylabel(r'Flux')
#Construct image
#visz.star(img, spot)
#visz.polar(img, spot)
visz.dissect(img)
visz.star_plot(0.0)
visz.polar_dissect(img)
visz.polar_plot(0.0)
##################################################
# Compute line profile
es, yy2 = lineprofile(visz.redshift**4, visz.redshift)
dE = np.max( np.abs(es[0] - compactness), np.abs(compactness - es[-1]))
##################################################
#Save redshift into a file
fname = 'reds_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
visz.redshift.flatten(),
delimiter=',',
fmt = '%10.9e'
)
#Save thetas into a file
fname = 'thetas_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
visz.thetas.flatten(),
delimiter=',',
fmt = '%10.9e'
)
#Save phi into a file
fname = 'phis_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
visz.phis.flatten(),
delimiter=',',
fmt = '%10.9e'
)
#redshift limits
vmin = compactness - dE
vmax = compactness + dE
# Line profile
##################################################
#ax = subplot(gs[2,2])
#ax.set_xlim(0.8, 1.2)
visz.axs[6].plot(es, yy2, "b-")
pause(1.0)
fname = 'neutronstar_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.png'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
savefig(outdir+fname)
#save lineprofile
##################################################
#Finally save to file
fname = 'lineprofile_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
np.vstack((es, yy2)).T,
delimiter=',',
fmt = '%10.9e',
header='Energy, pdf'
)
|
from tkinter import *
import goslate
import sys
#--------------------------------------------------------------
#Tkinter windows creation:
root = Tk()
root.geometry("500x300")
root.title("TransPy - Will Venn")
#--------------------------------------------------------------
#Functions:
def Translate_GB():
s = Entry_1.get()
gs = goslate.Goslate()
translated = gs.translate(s, "gb")
print (translated)
def Translate_FR():
s = Entry_1.get()
gs = goslate.Goslate()
translated = gs.translate(s, "fr")
print (translated)
def Translate_ES():
s = Entry_1.get()
gs = goslate.Goslate()
translated = gs.translate(s, "es")
print (translated)
def Translate_SV():
s = Entry_1.get()
gs = goslate.Goslate()
translated = gs.translate(s, "sv")
print (translated)
#--------------------------------------------------------------
#Labels:
Label_Intro = Label(root, text="Enter the text you would like to translate below")
Label_SupportedLang = Label(root, text="The supported languages are: ")
Label_ENG = Label(root, text="GB = English")
Label_FR = Label(root, text="FR = French")
Label_ES = Label(root, text="ES = Spanish")
Label_SV = Label(root, text="SV = Swedish")
#--------------------------------------------------------------
#Buttons:
Button_Submit_GB = Button(root, text="Translate to GB", bd=3, command=Translate_GB)
Button_Submit_FR = Button(root, text="Translate to FR", bd=3, command=Translate_FR)
Button_Submit_ES = Button(root, text="Translate_ES", bd=3, command=Translate_ES)
Button_Submit_SV = Button(root, text="Translate_SV", bd=3, command=Translate_SV)
#--------------------------------------------------------------
#Entry box:
Entry_1 = Entry(root)
Entry_1.delete(0, END)
Entry_1.insert(0, "Translate here: ")
##s = Entry_1.get()
#--------------------------------------------------------------
#Packings:
Label_Intro.pack()
Label_SupportedLang.pack()
Label_ENG.pack()
Label_FR.pack()
Label_ES.pack()
Label_SV.pack()
Entry_1.pack()
Button_Submit_GB.pack()
Button_Submit_FR.pack()
Button_Submit_ES.pack()
Button_Submit_SV.pack()
#--------------------------------------------------------------
#--------------------------------------------------------------
#Loops:
root.mainloop()
#--------------------------------------------------------------
|
from hl7apy.utils import iteritems
DATATYPES = {
'AD_1': ['leaf', None, 'ST', 'STREET_ADDRESS_ST', None, -1],
'AD_2': ['leaf', None, 'ST', 'OTHER_DESIGNATION', None, -1],
'AD_3': ['leaf', None, 'ST', 'CITY', None, -1],
'AD_4': ['leaf', None, 'ST', 'STATE_OR_PROVINCE', None, -1],
'AD_5': ['leaf', None, 'ST', 'ZIP_OR_POSTAL_CODE', None, -1],
'AD_6': ['leaf', None, 'ID', 'COUNTRY', None, -1],
'AD_7': ['leaf', None, 'ID', 'ADDRESS_TYPE', None, -1],
'AD_8': ['leaf', None, 'ST', 'OTHER_GEOGRAPHIC_DESIGNATION', None, -1],
'AUI_1': ['leaf', None, 'ST', 'AUTHORIZATION_NUMBER', None, -1],
'AUI_2': ['leaf', None, 'DT', 'DATE', None, -1],
'AUI_3': ['leaf', None, 'ST', 'SOURCE', None, -1],
'CCD_1': ['leaf', None, 'ID', 'WHEN_TO_CHARGE_CODE', None, -1],
'CCD_2': ['sequence', None, 'TS', 'DATE_TIME', None, -1],
'CCP_1': ['leaf', None, 'NM', 'CHANNEL_CALIBRATION_SENSITIVITY_CORRECTION_FACTOR', None, -1],
'CCP_2': ['leaf', None, 'NM', 'CHANNEL_CALIBRATION_BASELINE', None, -1],
'CCP_3': ['leaf', None, 'NM', 'CHANNEL_CALIBRATION_TIME_SKEW', None, -1],
'CD_1': ['sequence', None, 'WVI', 'CHANNEL_IDENTIFIER', None, -1],
'CD_2': ['sequence', None, 'WVS', 'WAVEFORM_SOURCE', None, -1],
'CD_3': ['sequence', None, 'CSU', 'CHANNEL_SENSITIVITY_UNITS', None, -1],
'CD_4': ['sequence', None, 'CCP', 'CHANNEL_CALIBRATION_PARAMETERS', None, -1],
'CD_5': ['leaf', None, 'NM', 'CHANNEL_SAMPLING_FREQUENCY', None, -1],
'CD_6': ['sequence', None, 'NR', 'MINIMUM_MAXIMUM_DATA_VALUES', None, -1],
'CE_1': ['leaf', None, 'ST', 'IDENTIFIER_ST', None, -1],
'CE_2': ['leaf', None, 'ST', 'TEXT', None, -1],
'CE_3': ['leaf', None, 'IS', 'NAME_OF_CODING_SYSTEM', None, -1],
'CE_4': ['leaf', None, 'ST', 'ALTERNATE_IDENTIFIER_ST', None, -1],
'CE_5': ['leaf', None, 'ST', 'ALTERNATE_TEXT', None, -1],
'CE_6': ['leaf', None, 'IS', 'NAME_OF_ALTERNATE_CODING_SYSTEM', None, -1],
'CF_1': ['leaf', None, 'ID', 'IDENTIFIER_ID', None, -1],
'CF_2': ['leaf', None, 'FT', 'FORMATTED_TEXT', None, -1],
'CF_3': ['leaf', None, 'IS', 'NAME_OF_CODING_SYSTEM', None, -1],
'CF_4': ['leaf', None, 'ID', 'ALTERNATE_IDENTIFIER_ID', None, -1],
'CF_5': ['leaf', None, 'FT', 'ALTERNATE_FORMATTED_TEXT', None, -1],
'CF_6': ['leaf', None, 'IS', 'NAME_OF_ALTERNATE_CODING_SYSTEM', None, -1],
'CK_1': ['leaf', None, 'NM', 'ID_NUMBER_NM', None, -1],
'CK_2': ['leaf', None, 'NM', 'CHECK_DIGIT_NM', None, -1],
'CK_3': ['leaf', None, 'ID', 'CODE_IDENTIFYING_THE_CHECK_DIGIT_SCHEME_EMPLOYED', None, -1],
'CK_4': ['sequence', None, 'HD', 'ASSIGNING_AUTHORITY', 'HL70363', -1],
'CN_1': ['leaf', None, 'ST', 'ID_NUMBER_ST', None, -1],
'CN_2': ['sequence', None, 'FN', 'FAMILY_NAME', None, -1],
'CN_3': ['leaf', None, 'ST', 'GIVEN_NAME', None, -1],
'CN_4': ['leaf', None, 'ST', 'SECOND_AND_FURTHER_GIVEN_NAMES_OR_INITIALS_THEREOF', None, -1],
'CN_5': ['leaf', None, 'ST', 'SUFFIX_E_G_JR_OR_III', None, -1],
'CN_6': ['leaf', None, 'ST', 'PREFIX_E_G_DR', None, -1],
'CN_7': ['leaf', None, 'IS', 'DEGREE_E_G_MD', None, -1],
'CN_8': ['leaf', None, 'IS', 'SOURCE_TABLE', None, -1],
'CN_9': ['sequence', None, 'HD', 'ASSIGNING_AUTHORITY', None, -1],
'CNE_1': ['leaf', None, 'ST', 'IDENTIFIER_ST', None, -1],
'CNE_2': ['leaf', None, 'ST', 'TEXT', None, -1],
'CNE_3': ['leaf', None, 'IS', 'NAME_OF_CODING_SYSTEM', None, -1],
'CNE_4': ['leaf', None, 'ST', 'ALTERNATE_IDENTIFIER_ST', None, -1],
'CNE_5': ['leaf', None, 'ST', 'ALTERNATE_TEXT', None, -1],
'CNE_6': ['leaf', None, 'IS', 'NAME_OF_ALTERNATE_CODING_SYSTEM', None, -1],
'CNE_7': ['leaf', None, 'ST', 'CODING_SYSTEM_VERSION_ID', None, -1],
'CNE_8': ['leaf', None, 'ST', 'ALTERNATE_CODING_SYSTEM_VERSION_ID', None, -1],
'CNE_9': ['leaf', None, 'ST', 'ORIGINAL_TEXT', None, -1],
'CNN_1': ['leaf', None, 'ST', 'ID_NUMBER_ST', None, -1],
'CNN_2': ['leaf', None, 'ST', 'FAMILY_NAME', None, -1],
'CNN_3': ['leaf', None, 'ST', 'GIVEN_NAME', None, -1],
'CNN_4': ['leaf', None, 'ST', 'SECOND_AND_FURTHER_GIVEN_NAMES_OR_INITIALS_THEREOF', None, -1],
'CNN_5': ['leaf', None, 'ST', 'SUFFIX_E_G_JR_OR_III', None, -1],
'CNN_6': ['leaf', None, 'ST', 'PREFIX_E_G_DR', None, -1],
'CNN_7': ['leaf', None, 'IS', 'DEGREE_E_G_MD', None, -1],
'CNN_8': ['leaf', None, 'IS', 'SOURCE_TABLE', None, -1],
'CNN_9': ['leaf', None, 'IS', 'ASSIGNING_AUTHORITY_NAMESPACE_ID', None, -1],
'CNN_10': ['leaf', None, 'ST', 'ASSIGNING_AUTHORITY_UNIVERSAL_ID', None, -1],
'CNN_11': ['leaf', None, 'ID', 'ASSIGNING_AUTHORITY_UNIVERSAL_ID_TYPE', None, -1],
'CP_1': ['sequence', None, 'MO', 'PRICE', None, -1],
'CP_2': ['leaf', None, 'ID', 'PRICE_TYPE', 'HL70205', -1],
'CP_3': ['leaf', None, 'NM', 'FROM_VALUE', None, -1],
'CP_4': ['leaf', None, 'NM', 'TO_VALUE', None, -1],
'CP_5': ['sequence', None, 'CE', 'RANGE_UNITS', None, -1],
'CP_6': ['leaf', None, 'ID', 'RANGE_TYPE', 'HL70298', -1],
'CQ_1': ['leaf', None, 'NM', 'QUANTITY', None, -1],
'CQ_2': ['sequence', None, 'CE', 'UNITS', None, -1],
'CQ_SIMPLE_1': ['leaf', None, 'NM', 'QUANTITY', None, -1],
'CQ_SIMPLE_2': ['leaf', None, 'ST', 'UNITS', None, -1],
'CSU_1': ['leaf', None, 'NM', 'CHANNEL_SENSITIVITY', None, -1],
'CSU_2': ['leaf', None, 'ST', 'UNIT_OF_MEASURE_IDENTIFIER', None, -1],
'CSU_3': ['leaf', None, 'ST', 'UNIT_OF_MEASURE_DESCRIPTION', None, -1],
'CSU_4': ['leaf', None, 'IS', 'UNIT_OF_MEASURE_CODING_SYSTEM', None, -1],
'CSU_5': ['leaf', None, 'ST', 'ALTERNATE_UNIT_OF_MEASURE_IDENTIFIER', None, -1],
'CSU_6': ['leaf', None, 'ST', 'ALTERNATE_UNIT_OF_MEASURE_DESCRIPTION', None, -1],
'CSU_7': ['leaf', None, 'IS', 'ALTERNATE_UNIT_OF_MEASURE_CODING_SYSTEM', None, -1],
'CWE_1': ['leaf', None, 'ST', 'IDENTIFIER_ST', None, -1],
'CWE_2': ['leaf', None, 'ST', 'TEXT', None, -1],
'CWE_3': ['leaf', None, 'IS', 'NAME_OF_CODING_SYSTEM', None, -1],
'CWE_4': ['leaf', None, 'ST', 'ALTERNATE_IDENTIFIER_ST', None, -1],
'CWE_5': ['leaf', None, 'ST', 'ALTERNATE_TEXT', None, -1],
'CWE_6': ['leaf', None, 'IS', 'NAME_OF_ALTERNATE_CODING_SYSTEM', None, -1],
'CWE_7': ['leaf', None, 'ST', 'CODING_SYSTEM_VERSION_ID', None, -1],
'CWE_8': ['leaf', None, 'ST', 'ALTERNATE_CODING_SYSTEM_VERSION_ID', None, -1],
'CWE_9': ['leaf', None, 'ST', 'ORIGINAL_TEXT', None, -1],
'CX_1': ['leaf', None, 'ST', 'ID', None, -1],
'CX_2': ['leaf', None, 'ST', 'CHECK_DIGIT_ST', None, -1],
'CX_3': ['leaf', None, 'ID', 'CODE_IDENTIFYING_THE_CHECK_DIGIT_SCHEME_EMPLOYED', None, -1],
'CX_4': ['sequence', None, 'HD', 'ASSIGNING_AUTHORITY', None, -1],
'CX_5': ['leaf', None, 'ID', 'IDENTIFIER_TYPE_CODE_ID', 'HL70203', -1],
'CX_6': ['sequence', None, 'HD', 'ASSIGNING_FACILITY', None, -1],
'CX_7': ['leaf', None, 'DT', 'EFFECTIVE_DATE_DT', None, -1],
'CX_8': ['leaf', None, 'DT', 'EXPIRATION_DATE', None, -1],
'DDI_1': ['leaf', None, 'NM', 'DELAY_DAYS', None, -1],
'DDI_2': ['leaf', None, 'NM', 'AMOUNT', None, -1],
'DDI_3': ['leaf', None, 'NM', 'NUMBER_OF_DAYS', None, -1],
'DIN_1': ['sequence', None, 'TS', 'DATE', None, -1],
'DIN_2': ['sequence', None, 'CE', 'INSTITUTION_NAME', None, -1],
'DLD_1': ['leaf', None, 'ID', 'DISCHARGE_LOCATION', None, -1],
'DLD_2': ['sequence', None, 'TS', 'EFFECTIVE_DATE', None, -1],
'DLN_1': ['leaf', None, 'ST', 'DRIVER_S_LICENSE_NUMBER', None, -1],
'DLN_2': ['leaf', None, 'IS', 'ISSUING_STATE_PROVINCE_COUNTRY', None, -1],
'DLN_3': ['leaf', None, 'DT', 'EXPIRATION_DATE', None, -1],
'DLT_1': ['sequence', None, 'NR', 'RANGE', None, -1],
'DLT_2': ['leaf', None, 'NM', 'NUMERIC_THRESHOLD', None, -1],
'DLT_3': ['leaf', None, 'ST', 'CHANGE_COMPUTATION', None, -1],
'DLT_4': ['leaf', None, 'NM', 'LENGTH_OF_TIME_DAYS', None, -1],
'DR_1': ['sequence', None, 'TS', 'RANGE_START_DATE_TIME', None, -1],
'DR_2': ['sequence', None, 'TS', 'RANGE_END_DATE_TIME', None, -1],
'DR_SIMPLE_1': ['leaf', None, 'ST', 'RANGE_START_DATE_TIME', None, -1],
'DR_SIMPLE_2': ['leaf', None, 'ST', 'RANGE_END_DATE_TIME', None, -1],
'DTN_1': ['leaf', None, 'IS', 'DAY_TYPE', None, -1],
'DTN_2': ['leaf', None, 'NM', 'NUMBER_OF_DAYS', None, -1],
'ED_1': ['sequence', None, 'HD', 'SOURCE_APPLICATION', None, -1],
'ED_2': ['leaf', None, 'ID', 'TYPE_OF_DATA', 'HL70191', -1],
'ED_3': ['leaf', None, 'ID', 'DATA', 'HL70291', -1],
'ED_4': ['leaf', None, 'ID', 'ENCODING', 'HL70299', -1],
'ED_5': ['leaf', None, 'ST', 'DATA', None, -1],
'EI_1': ['leaf', None, 'ST', 'ENTITY_IDENTIFIER', None, -1],
'EI_2': ['leaf', None, 'IS', 'NAMESPACE_ID', 'HL70300', -1],
'EI_3': ['leaf', None, 'ST', 'UNIVERSAL_ID', None, -1],
'EI_4': ['leaf', None, 'ID', 'UNIVERSAL_ID_TYPE', 'HL70301', -1],
'EIP_1': ['sequence', None, 'EI', 'PARENT_S_PLACER_ORDER_NUMBER', None, -1],
'EIP_2': ['sequence', None, 'EI', 'PARENT_S_FILLER_ORDER_NUMBER', None, -1],
'ELD_1': ['leaf', None, 'ST', 'SEGMENT_ID', None, -1],
'ELD_2': ['leaf', None, 'NM', 'SEQUENCE', None, -1],
'ELD_3': ['leaf', None, 'NM', 'FIELD_POSITION', None, -1],
'ELD_4': ['sequence', None, 'CE', 'CODE_IDENTIFYING_ERROR', None, -1],
'FC_1': ['leaf', None, 'IS', 'FINANCIAL_CLASS', 'HL70064', -1],
'FC_2': ['sequence', None, 'TS', 'EFFECTIVE_DATE_TS', None, -1],
'FN_1': ['leaf', None, 'ST', 'SURNAME', None, -1],
'FN_2': ['leaf', None, 'ST', 'OWN_SURNAME_PREFIX', None, -1],
'FN_3': ['leaf', None, 'ST', 'OWN_SURNAME', None, -1],
'FN_4': ['leaf', None, 'ST', 'SURNAME_PREFIX_FROM_PARTNER_SPOUSE', None, -1],
'FN_5': ['leaf', None, 'ST', 'SURNAME_FROM_PARTNER_SPOUSE', None, -1],
'HD_1': ['leaf', None, 'IS', 'NAMESPACE_ID', 'HL70300', -1],
'HD_2': ['leaf', None, 'ST', 'UNIVERSAL_ID', None, -1],
'HD_3': ['leaf', None, 'ID', 'UNIVERSAL_ID_TYPE', 'HL70301', -1],
'JCC_1': ['leaf', None, 'IS', 'JOB_CODE', 'HL70327', -1],
'JCC_2': ['leaf', None, 'IS', 'JOB_CLASS', 'HL70328', -1],
'LA1_1': ['leaf', None, 'IS', 'POINT_OF_CARE_IS', None, -1],
'LA1_2': ['leaf', None, 'IS', 'ROOM', None, -1],
'LA1_3': ['leaf', None, 'IS', 'BED', None, -1],
'LA1_4': ['sequence', None, 'HD', 'FACILITY_HD', None, -1],
'LA1_5': ['leaf', None, 'IS', 'LOCATION_STATUS', None, -1],
'LA1_6': ['leaf', None, 'IS', 'PERSON_LOCATION_TYPE', None, -1],
'LA1_7': ['leaf', None, 'IS', 'BUILDING', None, -1],
'LA1_8': ['leaf', None, 'IS', 'FLOOR', None, -1],
'LA1_9': ['sequence', None, 'AD', 'ADDRESS', None, -1],
'LA2_1': ['leaf', None, 'IS', 'POINT_OF_CARE_IS', None, -1],
'LA2_2': ['leaf', None, 'IS', 'ROOM', None, -1],
'LA2_3': ['leaf', None, 'IS', 'BED', None, -1],
'LA2_4': ['sequence', None, 'HD', 'FACILITY_HD', None, -1],
'LA2_5': ['leaf', None, 'IS', 'LOCATION_STATUS', None, -1],
'LA2_6': ['leaf', None, 'IS', 'PERSON_LOCATION_TYPE', None, -1],
'LA2_7': ['leaf', None, 'IS', 'BUILDING', None, -1],
'LA2_8': ['leaf', None, 'IS', 'FLOOR', None, -1],
'LA2_9': ['leaf', None, 'ST', 'STREET_ADDRESS_ST', None, -1],
'LA2_10': ['leaf', None, 'ST', 'OTHER_DESIGNATION', None, -1],
'LA2_11': ['leaf', None, 'ST', 'CITY', None, -1],
'LA2_12': ['leaf', None, 'ST', 'STATE_OR_PROVINCE', None, -1],
'LA2_13': ['leaf', None, 'ST', 'ZIP_OR_POSTAL_CODE', None, -1],
'LA2_14': ['leaf', None, 'ID', 'COUNTRY', None, -1],
'LA2_15': ['leaf', None, 'ID', 'ADDRESS_TYPE', None, -1],
'LA2_16': ['leaf', None, 'ST', 'OTHER_GEOGRAPHIC_DESIGNATION', None, -1],
'MA_1': ['leaf', None, 'NM', 'SAMPLE_1_FROM_CHANNEL_1', None, -1],
'MA_2': ['leaf', None, 'NM', 'SAMPLE_1_FROM_CHANNEL_2', None, -1],
'MA_3': ['leaf', None, 'NM', 'SAMPLE_1_FROM_CHANNEL_3', None, -1],
'MA_4': ['leaf', None, 'NM', 'SAMPLE_1_FROM_CHANNEL_4', None, -1],
'MA_5': ['leaf', None, 'NM', 'SAMPLE_1_FROM_CHANNEL_5', None, -1],
'MA_6': ['leaf', None, 'NM', 'SAMPLE_1_FROM_CHANNEL_6', None, -1],
'MO_1': ['leaf', None, 'NM', 'QUANTITY', None, -1],
'MO_2': ['leaf', None, 'ID', 'DENOMINATION', None, -1],
'MOC_1': ['sequence', None, 'MO', 'DOLLAR_AMOUNT', None, -1],
'MOC_2': ['sequence', None, 'CE', 'CHARGE_CODE', None, -1],
'MOP_1': ['leaf', None, 'IS', 'MONEY_OR_PERCENTAGE_INDICATOR', None, -1],
'MOP_2': ['leaf', None, 'NM', 'MONEY_OR_PERCENTAGE_QUANTITY', None, -1],
'MSG_1': ['leaf', None, 'ID', 'MESSAGE_TYPE', None, -1],
'MSG_2': ['leaf', None, 'ID', 'TRIGGER_EVENT', None, -1],
'MSG_3': ['leaf', None, 'ID', 'MESSAGE_STRUCTURE', None, -1],
'NA_1': ['leaf', None, 'NM', 'VALUE1', None, -1],
'NA_2': ['leaf', None, 'NM', 'VALUE2', None, -1],
'NA_3': ['leaf', None, 'NM', 'VALUE3', None, -1],
'NA_4': ['leaf', None, 'NM', 'VALUE4', None, -1],
'NDL_1': ['sequence', None, 'CNN', 'NAME', None, -1],
'NDL_2': ['sequence', None, 'TS', 'START_DATE_TIME', None, -1],
'NDL_3': ['sequence', None, 'TS', 'END_DATE_TIME', None, -1],
'NDL_4': ['leaf', None, 'IS', 'POINT_OF_CARE_IS', None, -1],
'NDL_5': ['leaf', None, 'IS', 'ROOM', None, -1],
'NDL_6': ['leaf', None, 'IS', 'BED', None, -1],
'NDL_7': ['sequence', None, 'HD', 'FACILITY_HD', None, -1],
'NDL_8': ['leaf', None, 'IS', 'LOCATION_STATUS', None, -1],
'NDL_9': ['leaf', None, 'IS', 'PERSON_LOCATION_TYPE', None, -1],
'NDL_10': ['leaf', None, 'IS', 'BUILDING', None, -1],
'NDL_11': ['leaf', None, 'IS', 'FLOOR', None, -1],
'NR_1': ['leaf', None, 'NM', 'LOW_VALUE', None, -1],
'NR_2': ['leaf', None, 'NM', 'HIGH_VALUE', None, -1],
'OCD_1': ['leaf', None, 'IS', 'OCCURRENCE_CODE', None, -1],
'OCD_2': ['leaf', None, 'DT', 'OCCURRENCE_DATE', None, -1],
'OSD_1': ['leaf', None, 'ID', 'SEQUENCE_RESULTS_FLAG', None, -1],
'OSD_2': ['leaf', None, 'ST', 'PLACER_ORDER_NUMBER_ENTITY_IDENTIFIER', None, -1],
'OSD_3': ['leaf', None, 'IS', 'PLACER_ORDER_NUMBER_NAMESPACE_ID', None, -1],
'OSD_4': ['leaf', None, 'ST', 'FILLER_ORDER_NUMBER_ENTITY_IDENTIFIER', None, -1],
'OSD_5': ['leaf', None, 'IS', 'FILLER_ORDER_NUMBER_NAMESPACE_ID', None, -1],
'OSD_6': ['leaf', None, 'ST', 'SEQUENCE_CONDITION_VALUE', None, -1],
'OSD_7': ['leaf', None, 'NM', 'MAXIMUM_NUMBER_OF_REPEATS', None, -1],
'OSD_8': ['leaf', None, 'ST', 'PLACER_ORDER_NUMBER_UNIVERSAL_ID', None, -1],
'OSD_9': ['leaf', None, 'ID', 'PLACER_ORDER_NUMBER_UNIVERSAL_ID_TYPE', None, -1],
'OSD_10': ['leaf', None, 'ST', 'FILLER_ORDER_NUMBER_UNIVERSAL_ID', None, -1],
'OSD_11': ['leaf', None, 'ID', 'FILLER_ORDER_NUMBER_UNIVERSAL_ID_TYPE', None, -1],
'OSP_1': ['sequence', None, 'CE', 'OCCURRENCE_SPAN_CODE', None, -1],
'OSP_2': ['leaf', None, 'DT', 'OCCURRENCE_SPAN_START_DATE', None, -1],
'OSP_3': ['leaf', None, 'DT', 'OCCURRENCE_SPAN_STOP_DATE', None, -1],
'PCF_1': ['leaf', None, 'IS', 'PRE_CERTIFICATION_PATIENT_TYPE', None, -1],
'PCF_2': ['leaf', None, 'ID', 'PRE_CERTIFICATION_REQUIRED', None, -1],
'PCF_3': ['sequence', None, 'TS', 'PRE_CERTIFICATION_WINDOW', None, -1],
'PI_1': ['leaf', None, 'ST', 'ID_NUMBER_ST', None, -1],
'PI_2': ['leaf', None, 'IS', 'TYPE_OF_ID_NUMBER_IS', None, -1],
'PI_3': ['leaf', None, 'ST', 'OTHER_QUALIFYING_INFO', None, -1],
'PIP_1': ['sequence', None, 'CE', 'PRIVILEGE', None, -1],
'PIP_2': ['sequence', None, 'CE', 'PRIVILEGE_CLASS', None, -1],
'PIP_3': ['leaf', None, 'DT', 'EXPIRATION_DATE', None, -1],
'PIP_4': ['leaf', None, 'DT', 'ACTIVATION_DATE', None, -1],
'PIP_5': ['sequence', None, 'EI', 'FACILITY_EI', None, -1],
'PL_1': ['leaf', None, 'IS', 'POINT_OF_CARE', None, -1],
'PL_2': ['leaf', None, 'IS', 'ROOM', None, -1],
'PL_3': ['leaf', None, 'IS', 'BED', None, -1],
'PL_4': ['sequence', None, 'HD', 'FACILITY_HD', 'HL70300', -1],
'PL_5': ['leaf', None, 'IS', 'LOCATION_STATUS', None, -1],
'PL_6': ['leaf', None, 'IS', 'PERSON_LOCATION_TYPE', None, -1],
'PL_7': ['leaf', None, 'IS', 'BUILDING', None, -1],
'PL_8': ['leaf', None, 'IS', 'FLOOR', None, -1],
'PL_9': ['leaf', None, 'ST', 'LOCATION_DESCRIPTION', None, -1],
'PLN_1': ['leaf', None, 'ST', 'ID_NUMBER_ST', None, -1],
'PLN_2': ['leaf', None, 'IS', 'TYPE_OF_ID_NUMBER_IS', None, -1],
'PLN_3': ['leaf', None, 'ST', 'STATE_OTHER_QUALIFYING_INFO', None, -1],
'PLN_4': ['leaf', None, 'DT', 'EXPIRATION_DATE', None, -1],
'PN_1': ['sequence', None, 'FN', 'FAMILY_NAME', None, -1],
'PN_2': ['leaf', None, 'ST', 'GIVEN_NAME', None, -1],
'PN_3': ['leaf', None, 'ST', 'SECOND_AND_FURTHER_GIVEN_NAMES_OR_INITIALS_THEREOF', None, -1],
'PN_4': ['leaf', None, 'ST', 'SUFFIX_E_G_JR_OR_III', None, -1],
'PN_5': ['leaf', None, 'ST', 'PREFIX_E_G_DR', None, -1],
'PN_6': ['leaf', None, 'IS', 'DEGREE_E_G_MD', None, -1],
'PPN_1': ['leaf', None, 'ST', 'ID_NUMBER_ST', None, -1],
'PPN_2': ['sequence', None, 'FN', 'FAMILY_NAME', None, -1],
'PPN_3': ['leaf', None, 'ST', 'GIVEN_NAME', None, -1],
'PPN_4': ['leaf', None, 'ST', 'SECOND_AND_FURTHER_GIVEN_NAMES_OR_INITIALS_THEREOF', None, -1],
'PPN_5': ['leaf', None, 'ST', 'SUFFIX_E_G_JR_OR_III', None, -1],
'PPN_6': ['leaf', None, 'ST', 'PREFIX_E_G_DR', None, -1],
'PPN_7': ['leaf', None, 'IS', 'DEGREE_E_G_MD', None, -1],
'PPN_8': ['leaf', None, 'IS', 'SOURCE_TABLE', None, -1],
'PPN_9': ['sequence', None, 'HD', 'ASSIGNING_AUTHORITY', 'HL70363', -1],
'PPN_10': ['leaf', None, 'ID', 'NAME_TYPE_CODE', None, -1],
'PPN_11': ['leaf', None, 'ST', 'IDENTIFIER_CHECK_DIGIT', None, -1],
'PPN_12': ['leaf', None, 'ID', 'CODE_IDENTIFYING_THE_CHECK_DIGIT_SCHEME_EMPLOYED', None, -1],
'PPN_13': ['leaf', None, 'IS', 'IDENTIFIER_TYPE_CODE_IS', None, -1],
'PPN_14': ['sequence', None, 'HD', 'ASSIGNING_FACILITY', None, -1],
'PPN_15': ['sequence', None, 'TS', 'DATE_TIME_ACTION_PERFORMED', None, -1],
'PPN_16': ['leaf', None, 'ID', 'NAME_REPRESENTATION_CODE', None, -1],
'PPN_17': ['sequence', None, 'CE', 'NAME_CONTEXT', None, -1],
'PPN_18': ['sequence', None, 'DR_SIMPLE', 'NAME_VALIDITY_RANGE', None, -1],
'PPN_19': ['leaf', None, 'ID', 'NAME_ASSEMBLY_ORDER', None, -1],
'PRL_1': ['sequence', None, 'CE', 'OBX_3_OBSERVATION_IDENTIFIER_OF_PARENT_RESULT', None, -1],
'PRL_2': ['leaf', None, 'ST', 'OBX_4_SUB_ID_OF_PARENT_RESULT', None, -1],
'PRL_3': ['leaf', None, 'TX', 'PART_OF_OBX_5_OBSERVATION_RESULT_FROM_PARENT', None, -1],
'PT_1': ['leaf', None, 'ID', 'PROCESSING_ID', None, -1],
'PT_2': ['leaf', None, 'ID', 'PROCESSING_MODE', None, -1],
'PTA_1': ['leaf', None, 'IS', 'POLICY_TYPE', None, -1],
'PTA_2': ['leaf', None, 'IS', 'AMOUNT_CLASS', None, -1],
'PTA_3': ['leaf', None, 'NM', 'AMOUNT', None, -1],
'QIP_1': ['leaf', None, 'ST', 'SEGMENT_FIELD_NAME', None, -1],
'QIP_2': ['leaf', None, 'ST', 'VALUE1_VALUE2_VALUE3', None, -1],
'QSC_1': ['leaf', None, 'ST', 'SEGMENT_FIELD_NAME', None, -1],
'QSC_2': ['leaf', None, 'ID', 'RELATIONAL_OPERATOR', None, -1],
'QSC_3': ['leaf', None, 'ST', 'VALUE', None, -1],
'QSC_4': ['leaf', None, 'ID', 'RELATIONAL_CONJUNCTION', None, -1],
'RCD_1': ['leaf', None, 'ST', 'SEGMENT_FIELD_NAME', None, -1],
'RCD_2': ['leaf', None, 'ST', 'HL7_DATE_TYPE', None, -1],
'RCD_3': ['leaf', None, 'NM', 'MAXIMUM_COLUMN_WIDTH', None, -1],
'RFR_1': ['sequence', None, 'NR', 'NUMERIC_RANGE', None, -1],
'RFR_2': ['leaf', None, 'IS', 'ADMINISTRATIVE_SEX', None, -1],
'RFR_3': ['sequence', None, 'NR', 'AGE_RANGE', None, -1],
'RFR_4': ['sequence', None, 'NR', 'GESTATIONAL_RANGE', None, -1],
'RFR_5': ['leaf', None, 'TX', 'SPECIES', None, -1],
'RFR_6': ['leaf', None, 'ST', 'RACE_SUBSPECIES', None, -1],
'RFR_7': ['leaf', None, 'TX', 'CONDITIONS', None, -1],
'RI_1': ['leaf', None, 'IS', 'REPEAT_PATTERN', None, -1],
'RI_2': ['leaf', None, 'ST', 'EXPLICIT_TIME_INTERVAL', None, -1],
'RMC_1': ['leaf', None, 'IS', 'ROOM_TYPE', None, -1],
'RMC_2': ['leaf', None, 'IS', 'AMOUNT_TYPE', None, -1],
'RMC_3': ['leaf', None, 'NM', 'COVERAGE_AMOUNT', None, -1],
'RP_1': ['leaf', None, 'ST', 'POINTER', None, -1],
'RP_2': ['sequence', None, 'HD', 'APPLICATION_ID', None, -1],
'RP_3': ['leaf', None, 'ID', 'TYPE_OF_DATA', None, -1],
'RP_4': ['leaf', None, 'ID', 'SUBTYPE', None, -1],
'SAD_1': ['leaf', None, 'ST', 'STREET_OR_MAILING_ADDRESS', None, -1],
'SAD_2': ['leaf', None, 'ST', 'STREET_NAME', None, -1],
'SAD_3': ['leaf', None, 'ST', 'DWELLING_NUMBER', None, -1],
'SCV_1': ['leaf', None, 'IS', 'PARAMETER_CLASS', None, -1],
'SCV_2': ['leaf', None, 'ST', 'PARAMETER_VALUE', None, -1],
'SN_1': ['leaf', None, 'ST', 'COMPARATOR', None, -1],
'SN_2': ['leaf', None, 'NM', 'NUM1', None, -1],
'SN_3': ['leaf', None, 'ST', 'SEPARATOR_SUFFIX', None, -1],
'SN_4': ['leaf', None, 'NM', 'NUM2', None, -1],
'SPD_1': ['leaf', None, 'ST', 'SPECIALTY_NAME', None, -1],
'SPD_2': ['leaf', None, 'ST', 'GOVERNING_BOARD', None, -1],
'SPD_3': ['leaf', None, 'ID', 'ELIGIBLE_OR_CERTIFIED', None, -1],
'SPD_4': ['leaf', None, 'DT', 'DATE_OF_CERTIFICATION', None, -1],
'SPS_1': ['sequence', None, 'CE', 'SPECIMEN_SOURCE_NAME_OR_CODE', None, -1],
'SPS_2': ['leaf', None, 'TX', 'ADDITIVES', None, -1],
'SPS_3': ['leaf', None, 'TX', 'FREETEXT', None, -1],
'SPS_4': ['sequence', None, 'CE', 'BODY_SITE', None, -1],
'SPS_5': ['sequence', None, 'CE', 'SITE_MODIFIER', None, -1],
'SPS_6': ['sequence', None, 'CE', 'COLLECTION_MODIFIER_METHOD_CODE', None, -1],
'SPS_7': ['sequence', None, 'CE', 'SPECIMEN_ROLE', None, -1],
'SRT_1': ['leaf', None, 'ST', 'SORT_BY_FIELD', None, -1],
'SRT_2': ['leaf', None, 'ID', 'SEQUENCING', None, -1],
'TQ_1': ['sequence', None, 'CQ_SIMPLE', 'QUANTITY', None, -1],
'TQ_2': ['sequence', None, 'RI', 'INTERVAL', None, -1],
'TQ_3': ['leaf', None, 'ST', 'DURATION', None, -1],
'TQ_4': ['sequence', None, 'TS', 'START_DATE_TIME', None, -1],
'TQ_5': ['sequence', None, 'TS', 'END_DATE_TIME', None, -1],
'TQ_6': ['leaf', None, 'ST', 'PRIORITY', None, -1],
'TQ_7': ['leaf', None, 'ST', 'CONDITION', None, -1],
'TQ_8': ['leaf', None, 'TX', 'TEXT_TX', None, -1],
'TQ_9': ['leaf', None, 'ID', 'CONJUNCTION_COMPONENT', None, -1],
'TQ_10': ['sequence', None, 'OSD', 'ORDER_SEQUENCING', None, -1],
'TQ_11': ['sequence', None, 'CE', 'OCCURRENCE_DURATION', None, -1],
'TQ_12': ['leaf', None, 'NM', 'TOTAL_OCCURENCES', None, -1],
'TS_1': ['leaf', None, 'ST', 'TIME_OF_AN_EVENT', None, -1],
'TS_2': ['leaf', None, 'ST', 'DEGREE_OF_PRECISION', None, -1],
'TX_CHALLENGE_1': ['leaf', None, 'TX', '', 'HL70256', -1],
'TX_CHALLENGE_2': ['leaf', None, 'TX', '', 'HL70257', -1],
'UVC_1': ['leaf', None, 'IS', 'VALUE_CODE', None, -1],
'UVC_2': ['leaf', None, 'NM', 'VALUE_AMOUNT', None, -1],
'VH_1': ['leaf', None, 'ID', 'START_DAY_RANGE', None, -1],
'VH_2': ['leaf', None, 'ID', 'END_DAY_RANGE', None, -1],
'VH_3': ['leaf', None, 'TM', 'START_HOUR_RANGE', None, -1],
'VH_4': ['leaf', None, 'TM', 'END_HOUR_RANGE', None, -1],
'VID_1': ['leaf', None, 'ID', 'VERSION_ID', None, -1],
'VID_2': ['sequence', None, 'CE', 'INTERNATIONALIZATION_CODE', None, -1],
'VID_3': ['sequence', None, 'CE', 'INTERNATIONAL_VERSION_ID', None, -1],
'VR_1': ['leaf', None, 'ST', 'FIRST_DATA_CODE_VALUE', None, -1],
'VR_2': ['leaf', None, 'ST', 'LAST_DATA_CODE_CALUE', None, -1],
'WVI_1': ['leaf', None, 'NM', 'CHANNEL_NUMBER', None, -1],
'WVI_2': ['leaf', None, 'ST', 'CHANNEL_NAME', None, -1],
'WVS_1': ['leaf', None, 'ST', 'SOURCE_NAME_1', None, -1],
'WVS_2': ['leaf', None, 'ST', 'SOURCE_NAME_2', None, -1],
'XAD_1': ['sequence', None, 'SAD', 'STREET_ADDRESS_SAD', None, -1],
'XAD_2': ['leaf', None, 'ST', 'OTHER_DESIGNATION', None, -1],
'XAD_3': ['leaf', None, 'ST', 'CITY', None, -1],
'XAD_4': ['leaf', None, 'ST', 'STATE_OR_PROVINCE', None, -1],
'XAD_5': ['leaf', None, 'ST', 'ZIP_OR_POSTAL_CODE', None, -1],
'XAD_6': ['leaf', None, 'ID', 'COUNTRY', None, -1],
'XAD_7': ['leaf', None, 'ID', 'ADDRESS_TYPE', None, -1],
'XAD_8': ['leaf', None, 'ST', 'OTHER_GEOGRAPHIC_DESIGNATION', None, -1],
'XAD_9': ['leaf', None, 'IS', 'COUNTY_PARISH_CODE', None, -1],
'XAD_10': ['leaf', None, 'IS', 'CENSUS_TRACT', None, -1],
'XAD_11': ['leaf', None, 'ID', 'ADDRESS_REPRESENTATION_CODE', None, -1],
'XAD_12': ['sequence', None, 'DR_SIMPLE', 'ADDRESS_VALIDITY_RANGE', None, -1],
'XCN_1': ['leaf', None, 'ST', 'ID_NUMBER_ST', None, -1],
'XCN_2': ['sequence', None, 'FN', 'FAMILY_NAME', None, -1],
'XCN_3': ['leaf', None, 'ST', 'GIVEN_NAME', None, -1],
'XCN_4': ['leaf', None, 'ST', 'SECOND_AND_FURTHER_GIVEN_NAMES_OR_INITIALS_THEREOF', None, -1],
'XCN_5': ['leaf', None, 'ST', 'SUFFIX_E_G_JR_OR_III', None, -1],
'XCN_6': ['leaf', None, 'ST', 'PREFIX_E_G_DR', None, -1],
'XCN_7': ['leaf', None, 'IS', 'DEGREE_E_G_MD', None, -1],
'XCN_8': ['leaf', None, 'IS', 'SOURCE_TABLE', None, -1],
'XCN_9': ['sequence', None, 'HD', 'ASSIGNING_AUTHORITY', None, -1],
'XCN_10': ['leaf', None, 'ID', 'NAME_TYPE_CODE', None, -1],
'XCN_11': ['leaf', None, 'ST', 'IDENTIFIER_CHECK_DIGIT', None, -1],
'XCN_12': ['leaf', None, 'ID', 'CODE_IDENTIFYING_THE_CHECK_DIGIT_SCHEME_EMPLOYED', None, -1],
'XCN_13': ['leaf', None, 'IS', 'IDENTIFIER_TYPE_CODE_IS', None, -1],
'XCN_14': ['sequence', None, 'HD', 'ASSIGNING_FACILITY', None, -1],
'XCN_15': ['leaf', None, 'ID', 'NAME_REPRESENTATION_CODE', None, -1],
'XCN_16': ['sequence', None, 'CE', 'NAME_CONTEXT', None, -1],
'XCN_17': ['sequence', None, 'DR_SIMPLE', 'NAME_VALIDITY_RANGE', None, -1],
'XCN_18': ['leaf', None, 'ID', 'NAME_ASSEMBLY_ORDER', None, -1],
'XON_1': ['leaf', None, 'ST', 'ORGANIZATION_NAME', None, -1],
'XON_2': ['leaf', None, 'IS', 'ORGANIZATION_NAME_TYPE_CODE', None, -1],
'XON_3': ['leaf', None, 'NM', 'ID_NUMBER_NM', None, -1],
'XON_4': ['leaf', None, 'NM', 'CHECK_DIGIT_NM', None, -1],
'XON_5': ['leaf', None, 'ID', 'CODE_IDENTIFYING_THE_CHECK_DIGIT_SCHEME_EMPLOYED', None, -1],
'XON_6': ['sequence', None, 'HD', 'ASSIGNING_AUTHORITY', None, -1],
'XON_7': ['leaf', None, 'IS', 'IDENTIFIER_TYPE_CODE_IS', None, -1],
'XON_8': ['sequence', None, 'HD', 'ASSIGNING_FACILITY_ID', None, -1],
'XON_9': ['leaf', None, 'ID', 'NAME_REPRESENTATION_CODE', None, -1],
'XPN_1': ['sequence', None, 'FN', 'FAMILY_NAME', None, -1],
'XPN_2': ['leaf', None, 'ST', 'GIVEN_NAME', None, -1],
'XPN_3': ['leaf', None, 'ST', 'SECOND_AND_FURTHER_GIVEN_NAMES_OR_INITIALS_THEREOF', None, -1],
'XPN_4': ['leaf', None, 'ST', 'SUFFIX_E_G_JR_OR_III', None, -1],
'XPN_5': ['leaf', None, 'ST', 'PREFIX_E_G_DR', None, -1],
'XPN_6': ['leaf', None, 'IS', 'DEGREE_E_G_MD', None, -1],
'XPN_7': ['leaf', None, 'ID', 'NAME_TYPE_CODE', None, -1],
'XPN_8': ['leaf', None, 'ID', 'NAME_REPRESENTATION_CODE', None, -1],
'XPN_9': ['sequence', None, 'CE', 'NAME_CONTEXT', None, -1],
'XPN_10': ['sequence', None, 'DR_SIMPLE', 'NAME_VALIDITY_RANGE', None, -1],
'XPN_11': ['leaf', None, 'ID', 'NAME_ASSEMBLY_ORDER', None, -1],
'XTN_1': ['leaf', None, 'TN', '999_999_9999_X99999_C_ANY_TEXT', None, -1],
'XTN_2': ['leaf', None, 'ID', 'TELECOMMUNICATION_USE_CODE', None, -1],
'XTN_3': ['leaf', None, 'ID', 'TELECOMMUNICATION_EQUIPMENT_TYPE_ID', None, -1],
'XTN_4': ['leaf', None, 'ST', 'EMAIL_ADDRESS', None, -1],
'XTN_5': ['leaf', None, 'NM', 'COUNTRY_CODE', None, -1],
'XTN_6': ['leaf', None, 'NM', 'AREA_CITY_CODE', None, -1],
'XTN_7': ['leaf', None, 'NM', 'PHONE_NUMBER', None, -1],
'XTN_8': ['leaf', None, 'NM', 'EXTENSION', None, -1],
'XTN_9': ['leaf', None, 'ST', 'ANY_TEXT', None, -1],
}
DATATYPES_STRUCTS = {
'AD': (
('AD_1', DATATYPES['AD_1'], (0, 1), 'CMP'),
('AD_2', DATATYPES['AD_2'], (0, 1), 'CMP'),
('AD_3', DATATYPES['AD_3'], (0, 1), 'CMP'),
('AD_4', DATATYPES['AD_4'], (0, 1), 'CMP'),
('AD_5', DATATYPES['AD_5'], (0, 1), 'CMP'),
('AD_6', DATATYPES['AD_6'], (0, 1), 'CMP'),
('AD_7', DATATYPES['AD_7'], (0, 1), 'CMP'),
('AD_8', DATATYPES['AD_8'], (0, 1), 'CMP'),),
'AUI': (
('AUI_1', DATATYPES['AUI_1'], (0, 1), 'CMP'),
('AUI_2', DATATYPES['AUI_2'], (0, 1), 'CMP'),
('AUI_3', DATATYPES['AUI_3'], (0, 1), 'CMP'),),
'CCD': (
('CCD_1', DATATYPES['CCD_1'], (0, 1), 'CMP'),
('CCD_2', DATATYPES['CCD_2'], (0, 1), 'CMP'),),
'CCP': (
('CCP_1', DATATYPES['CCP_1'], (0, 1), 'CMP'),
('CCP_2', DATATYPES['CCP_2'], (0, 1), 'CMP'),
('CCP_3', DATATYPES['CCP_3'], (0, 1), 'CMP'),),
'CD': (
('CD_1', DATATYPES['CD_1'], (0, 1), 'CMP'),
('CD_2', DATATYPES['CD_2'], (0, 1), 'CMP'),
('CD_3', DATATYPES['CD_3'], (0, 1), 'CMP'),
('CD_4', DATATYPES['CD_4'], (0, 1), 'CMP'),
('CD_5', DATATYPES['CD_5'], (0, 1), 'CMP'),
('CD_6', DATATYPES['CD_6'], (0, 1), 'CMP'),),
'CE': (
('CE_1', DATATYPES['CE_1'], (0, 1), 'CMP'),
('CE_2', DATATYPES['CE_2'], (0, 1), 'CMP'),
('CE_3', DATATYPES['CE_3'], (0, 1), 'CMP'),
('CE_4', DATATYPES['CE_4'], (0, 1), 'CMP'),
('CE_5', DATATYPES['CE_5'], (0, 1), 'CMP'),
('CE_6', DATATYPES['CE_6'], (0, 1), 'CMP'),),
'CF': (
('CF_1', DATATYPES['CF_1'], (0, 1), 'CMP'),
('CF_2', DATATYPES['CF_2'], (0, 1), 'CMP'),
('CF_3', DATATYPES['CF_3'], (0, 1), 'CMP'),
('CF_4', DATATYPES['CF_4'], (0, 1), 'CMP'),
('CF_5', DATATYPES['CF_5'], (0, 1), 'CMP'),
('CF_6', DATATYPES['CF_6'], (0, 1), 'CMP'),),
'CK': (
('CK_1', DATATYPES['CK_1'], (0, 1), 'CMP'),
('CK_2', DATATYPES['CK_2'], (0, 1), 'CMP'),
('CK_3', DATATYPES['CK_3'], (0, 1), 'CMP'),
('CK_4', DATATYPES['CK_4'], (0, 1), 'CMP'),),
'CN': (
('CN_1', DATATYPES['CN_1'], (0, 1), 'CMP'),
('CN_2', DATATYPES['CN_2'], (0, 1), 'CMP'),
('CN_3', DATATYPES['CN_3'], (0, 1), 'CMP'),
('CN_4', DATATYPES['CN_4'], (0, 1), 'CMP'),
('CN_5', DATATYPES['CN_5'], (0, 1), 'CMP'),
('CN_6', DATATYPES['CN_6'], (0, 1), 'CMP'),
('CN_7', DATATYPES['CN_7'], (0, 1), 'CMP'),
('CN_8', DATATYPES['CN_8'], (0, 1), 'CMP'),
('CN_9', DATATYPES['CN_9'], (0, 1), 'CMP'),),
'CNE': (
('CNE_1', DATATYPES['CNE_1'], (0, 1), 'CMP'),
('CNE_2', DATATYPES['CNE_2'], (0, 1), 'CMP'),
('CNE_3', DATATYPES['CNE_3'], (0, 1), 'CMP'),
('CNE_4', DATATYPES['CNE_4'], (0, 1), 'CMP'),
('CNE_5', DATATYPES['CNE_5'], (0, 1), 'CMP'),
('CNE_6', DATATYPES['CNE_6'], (0, 1), 'CMP'),
('CNE_7', DATATYPES['CNE_7'], (0, 1), 'CMP'),
('CNE_8', DATATYPES['CNE_8'], (0, 1), 'CMP'),
('CNE_9', DATATYPES['CNE_9'], (0, 1), 'CMP'),),
'CNN': (
('CNN_1', DATATYPES['CNN_1'], (0, 1), 'CMP'),
('CNN_2', DATATYPES['CNN_2'], (0, 1), 'CMP'),
('CNN_3', DATATYPES['CNN_3'], (0, 1), 'CMP'),
('CNN_4', DATATYPES['CNN_4'], (0, 1), 'CMP'),
('CNN_5', DATATYPES['CNN_5'], (0, 1), 'CMP'),
('CNN_6', DATATYPES['CNN_6'], (0, 1), 'CMP'),
('CNN_7', DATATYPES['CNN_7'], (0, 1), 'CMP'),
('CNN_8', DATATYPES['CNN_8'], (0, 1), 'CMP'),
('CNN_9', DATATYPES['CNN_9'], (0, 1), 'CMP'),
('CNN_10', DATATYPES['CNN_10'], (0, 1), 'CMP'),
('CNN_11', DATATYPES['CNN_11'], (0, 1), 'CMP'),),
'CP': (
('CP_1', DATATYPES['CP_1'], (0, 1), 'CMP'),
('CP_2', DATATYPES['CP_2'], (0, 1), 'CMP'),
('CP_3', DATATYPES['CP_3'], (0, 1), 'CMP'),
('CP_4', DATATYPES['CP_4'], (0, 1), 'CMP'),
('CP_5', DATATYPES['CP_5'], (0, 1), 'CMP'),
('CP_6', DATATYPES['CP_6'], (0, 1), 'CMP'),),
'CQ': (
('CQ_1', DATATYPES['CQ_1'], (0, 1), 'CMP'),
('CQ_2', DATATYPES['CQ_2'], (0, 1), 'CMP'),),
'CQ_SIMPLE': (
('CQ_SIMPLE_1', DATATYPES['CQ_SIMPLE_1'], (0, 1), 'CMP'),
('CQ_SIMPLE_2', DATATYPES['CQ_SIMPLE_2'], (0, 1), 'CMP'),),
'CSU': (
('CSU_1', DATATYPES['CSU_1'], (0, 1), 'CMP'),
('CSU_2', DATATYPES['CSU_2'], (0, 1), 'CMP'),
('CSU_3', DATATYPES['CSU_3'], (0, 1), 'CMP'),
('CSU_4', DATATYPES['CSU_4'], (0, 1), 'CMP'),
('CSU_5', DATATYPES['CSU_5'], (0, 1), 'CMP'),
('CSU_6', DATATYPES['CSU_6'], (0, 1), 'CMP'),
('CSU_7', DATATYPES['CSU_7'], (0, 1), 'CMP'),),
'CWE': (
('CWE_1', DATATYPES['CWE_1'], (0, 1), 'CMP'),
('CWE_2', DATATYPES['CWE_2'], (0, 1), 'CMP'),
('CWE_3', DATATYPES['CWE_3'], (0, 1), 'CMP'),
('CWE_4', DATATYPES['CWE_4'], (0, 1), 'CMP'),
('CWE_5', DATATYPES['CWE_5'], (0, 1), 'CMP'),
('CWE_6', DATATYPES['CWE_6'], (0, 1), 'CMP'),
('CWE_7', DATATYPES['CWE_7'], (0, 1), 'CMP'),
('CWE_8', DATATYPES['CWE_8'], (0, 1), 'CMP'),
('CWE_9', DATATYPES['CWE_9'], (0, 1), 'CMP'),),
'CX': (
('CX_1', DATATYPES['CX_1'], (0, 1), 'CMP'),
('CX_2', DATATYPES['CX_2'], (0, 1), 'CMP'),
('CX_3', DATATYPES['CX_3'], (0, 1), 'CMP'),
('CX_4', DATATYPES['CX_4'], (0, 1), 'CMP'),
('CX_5', DATATYPES['CX_5'], (0, 1), 'CMP'),
('CX_6', DATATYPES['CX_6'], (0, 1), 'CMP'),
('CX_7', DATATYPES['CX_7'], (0, 1), 'CMP'),
('CX_8', DATATYPES['CX_8'], (0, 1), 'CMP'),),
'DDI': (
('DDI_1', DATATYPES['DDI_1'], (0, 1), 'CMP'),
('DDI_2', DATATYPES['DDI_2'], (0, 1), 'CMP'),
('DDI_3', DATATYPES['DDI_3'], (0, 1), 'CMP'),),
'DIN': (
('DIN_1', DATATYPES['DIN_1'], (0, 1), 'CMP'),
('DIN_2', DATATYPES['DIN_2'], (0, 1), 'CMP'),),
'DLD': (
('DLD_1', DATATYPES['DLD_1'], (0, 1), 'CMP'),
('DLD_2', DATATYPES['DLD_2'], (0, 1), 'CMP'),),
'DLN': (
('DLN_1', DATATYPES['DLN_1'], (0, 1), 'CMP'),
('DLN_2', DATATYPES['DLN_2'], (0, 1), 'CMP'),
('DLN_3', DATATYPES['DLN_3'], (0, 1), 'CMP'),),
'DLT': (
('DLT_1', DATATYPES['DLT_1'], (0, 1), 'CMP'),
('DLT_2', DATATYPES['DLT_2'], (0, 1), 'CMP'),
('DLT_3', DATATYPES['DLT_3'], (0, 1), 'CMP'),
('DLT_4', DATATYPES['DLT_4'], (0, 1), 'CMP'),),
'DR': (
('DR_1', DATATYPES['DR_1'], (0, 1), 'CMP'),
('DR_2', DATATYPES['DR_2'], (0, 1), 'CMP'),),
'DR_SIMPLE': (
('DR_SIMPLE_1', DATATYPES['DR_SIMPLE_1'], (0, 1), 'CMP'),
('DR_SIMPLE_2', DATATYPES['DR_SIMPLE_2'], (0, 1), 'CMP'),),
'DTN': (
('DTN_1', DATATYPES['DTN_1'], (0, 1), 'CMP'),
('DTN_2', DATATYPES['DTN_2'], (0, 1), 'CMP'),),
'ED': (
('ED_1', DATATYPES['ED_1'], (0, 1), 'CMP'),
('ED_2', DATATYPES['ED_2'], (0, 1), 'CMP'),
('ED_3', DATATYPES['ED_3'], (0, 1), 'CMP'),
('ED_4', DATATYPES['ED_4'], (0, 1), 'CMP'),
('ED_5', DATATYPES['ED_5'], (0, 1), 'CMP'),),
'EI': (
('EI_1', DATATYPES['EI_1'], (0, 1), 'CMP'),
('EI_2', DATATYPES['EI_2'], (0, 1), 'CMP'),
('EI_3', DATATYPES['EI_3'], (0, 1), 'CMP'),
('EI_4', DATATYPES['EI_4'], (0, 1), 'CMP'),),
'EIP': (
('EIP_1', DATATYPES['EIP_1'], (0, 1), 'CMP'),
('EIP_2', DATATYPES['EIP_2'], (0, 1), 'CMP'),),
'ELD': (
('ELD_1', DATATYPES['ELD_1'], (0, 1), 'CMP'),
('ELD_2', DATATYPES['ELD_2'], (0, 1), 'CMP'),
('ELD_3', DATATYPES['ELD_3'], (0, 1), 'CMP'),
('ELD_4', DATATYPES['ELD_4'], (0, 1), 'CMP'),),
'FC': (
('FC_1', DATATYPES['FC_1'], (0, 1), 'CMP'),
('FC_2', DATATYPES['FC_2'], (0, 1), 'CMP'),),
'FN': (
('FN_1', DATATYPES['FN_1'], (0, 1), 'CMP'),
('FN_2', DATATYPES['FN_2'], (0, 1), 'CMP'),
('FN_3', DATATYPES['FN_3'], (0, 1), 'CMP'),
('FN_4', DATATYPES['FN_4'], (0, 1), 'CMP'),
('FN_5', DATATYPES['FN_5'], (0, 1), 'CMP'),),
'HD': (
('HD_1', DATATYPES['HD_1'], (0, 1), 'CMP'),
('HD_2', DATATYPES['HD_2'], (0, 1), 'CMP'),
('HD_3', DATATYPES['HD_3'], (0, 1), 'CMP'),),
'JCC': (
('JCC_1', DATATYPES['JCC_1'], (0, 1), 'CMP'),
('JCC_2', DATATYPES['JCC_2'], (0, 1), 'CMP'),),
'LA1': (
('LA1_1', DATATYPES['LA1_1'], (0, 1), 'CMP'),
('LA1_2', DATATYPES['LA1_2'], (0, 1), 'CMP'),
('LA1_3', DATATYPES['LA1_3'], (0, 1), 'CMP'),
('LA1_4', DATATYPES['LA1_4'], (0, 1), 'CMP'),
('LA1_5', DATATYPES['LA1_5'], (0, 1), 'CMP'),
('LA1_6', DATATYPES['LA1_6'], (0, 1), 'CMP'),
('LA1_7', DATATYPES['LA1_7'], (0, 1), 'CMP'),
('LA1_8', DATATYPES['LA1_8'], (0, 1), 'CMP'),
('LA1_9', DATATYPES['LA1_9'], (0, 1), 'CMP'),),
'LA2': (
('LA2_1', DATATYPES['LA2_1'], (0, 1), 'CMP'),
('LA2_2', DATATYPES['LA2_2'], (0, 1), 'CMP'),
('LA2_3', DATATYPES['LA2_3'], (0, 1), 'CMP'),
('LA2_4', DATATYPES['LA2_4'], (0, 1), 'CMP'),
('LA2_5', DATATYPES['LA2_5'], (0, 1), 'CMP'),
('LA2_6', DATATYPES['LA2_6'], (0, 1), 'CMP'),
('LA2_7', DATATYPES['LA2_7'], (0, 1), 'CMP'),
('LA2_8', DATATYPES['LA2_8'], (0, 1), 'CMP'),
('LA2_9', DATATYPES['LA2_9'], (0, 1), 'CMP'),
('LA2_10', DATATYPES['LA2_10'], (0, 1), 'CMP'),
('LA2_11', DATATYPES['LA2_11'], (0, 1), 'CMP'),
('LA2_12', DATATYPES['LA2_12'], (0, 1), 'CMP'),
('LA2_13', DATATYPES['LA2_13'], (0, 1), 'CMP'),
('LA2_14', DATATYPES['LA2_14'], (0, 1), 'CMP'),
('LA2_15', DATATYPES['LA2_15'], (0, 1), 'CMP'),
('LA2_16', DATATYPES['LA2_16'], (0, 1), 'CMP'),),
'MA': (
('MA_1', DATATYPES['MA_1'], (0, 1), 'CMP'),
('MA_2', DATATYPES['MA_2'], (0, 1), 'CMP'),
('MA_3', DATATYPES['MA_3'], (0, 1), 'CMP'),
('MA_4', DATATYPES['MA_4'], (0, 1), 'CMP'),
('MA_5', DATATYPES['MA_5'], (0, 1), 'CMP'),
('MA_6', DATATYPES['MA_6'], (0, 1), 'CMP'),),
'MO': (
('MO_1', DATATYPES['MO_1'], (0, 1), 'CMP'),
('MO_2', DATATYPES['MO_2'], (0, 1), 'CMP'),),
'MOC': (
('MOC_1', DATATYPES['MOC_1'], (0, 1), 'CMP'),
('MOC_2', DATATYPES['MOC_2'], (0, 1), 'CMP'),),
'MOP': (
('MOP_1', DATATYPES['MOP_1'], (0, 1), 'CMP'),
('MOP_2', DATATYPES['MOP_2'], (0, 1), 'CMP'),),
'MSG': (
('MSG_1', DATATYPES['MSG_1'], (0, 1), 'CMP'),
('MSG_2', DATATYPES['MSG_2'], (0, 1), 'CMP'),
('MSG_3', DATATYPES['MSG_3'], (0, 1), 'CMP'),),
'NA': (
('NA_1', DATATYPES['NA_1'], (0, 1), 'CMP'),
('NA_2', DATATYPES['NA_2'], (0, 1), 'CMP'),
('NA_3', DATATYPES['NA_3'], (0, 1), 'CMP'),
('NA_4', DATATYPES['NA_4'], (0, 1), 'CMP'),),
'NDL': (
('NDL_1', DATATYPES['NDL_1'], (0, 1), 'CMP'),
('NDL_2', DATATYPES['NDL_2'], (0, 1), 'CMP'),
('NDL_3', DATATYPES['NDL_3'], (0, 1), 'CMP'),
('NDL_4', DATATYPES['NDL_4'], (0, 1), 'CMP'),
('NDL_5', DATATYPES['NDL_5'], (0, 1), 'CMP'),
('NDL_6', DATATYPES['NDL_6'], (0, 1), 'CMP'),
('NDL_7', DATATYPES['NDL_7'], (0, 1), 'CMP'),
('NDL_8', DATATYPES['NDL_8'], (0, 1), 'CMP'),
('NDL_9', DATATYPES['NDL_9'], (0, 1), 'CMP'),
('NDL_10', DATATYPES['NDL_10'], (0, 1), 'CMP'),
('NDL_11', DATATYPES['NDL_11'], (0, 1), 'CMP'),),
'NR': (
('NR_1', DATATYPES['NR_1'], (0, 1), 'CMP'),
('NR_2', DATATYPES['NR_2'], (0, 1), 'CMP'),),
'OCD': (
('OCD_1', DATATYPES['OCD_1'], (0, 1), 'CMP'),
('OCD_2', DATATYPES['OCD_2'], (0, 1), 'CMP'),),
'OSD': (
('OSD_1', DATATYPES['OSD_1'], (0, 1), 'CMP'),
('OSD_2', DATATYPES['OSD_2'], (0, 1), 'CMP'),
('OSD_3', DATATYPES['OSD_3'], (0, 1), 'CMP'),
('OSD_4', DATATYPES['OSD_4'], (0, 1), 'CMP'),
('OSD_5', DATATYPES['OSD_5'], (0, 1), 'CMP'),
('OSD_6', DATATYPES['OSD_6'], (0, 1), 'CMP'),
('OSD_7', DATATYPES['OSD_7'], (0, 1), 'CMP'),
('OSD_8', DATATYPES['OSD_8'], (0, 1), 'CMP'),
('OSD_9', DATATYPES['OSD_9'], (0, 1), 'CMP'),
('OSD_10', DATATYPES['OSD_10'], (0, 1), 'CMP'),
('OSD_11', DATATYPES['OSD_11'], (0, 1), 'CMP'),),
'OSP': (
('OSP_1', DATATYPES['OSP_1'], (0, 1), 'CMP'),
('OSP_2', DATATYPES['OSP_2'], (0, 1), 'CMP'),
('OSP_3', DATATYPES['OSP_3'], (0, 1), 'CMP'),),
'PCF': (
('PCF_1', DATATYPES['PCF_1'], (0, 1), 'CMP'),
('PCF_2', DATATYPES['PCF_2'], (0, 1), 'CMP'),
('PCF_3', DATATYPES['PCF_3'], (0, 1), 'CMP'),),
'PI': (
('PI_1', DATATYPES['PI_1'], (0, 1), 'CMP'),
('PI_2', DATATYPES['PI_2'], (0, 1), 'CMP'),
('PI_3', DATATYPES['PI_3'], (0, 1), 'CMP'),),
'PIP': (
('PIP_1', DATATYPES['PIP_1'], (0, 1), 'CMP'),
('PIP_2', DATATYPES['PIP_2'], (0, 1), 'CMP'),
('PIP_3', DATATYPES['PIP_3'], (0, 1), 'CMP'),
('PIP_4', DATATYPES['PIP_4'], (0, 1), 'CMP'),
('PIP_5', DATATYPES['PIP_5'], (0, 1), 'CMP'),),
'PL': (
('PL_1', DATATYPES['PL_1'], (0, 1), 'CMP'),
('PL_2', DATATYPES['PL_2'], (0, 1), 'CMP'),
('PL_3', DATATYPES['PL_3'], (0, 1), 'CMP'),
('PL_4', DATATYPES['PL_4'], (0, 1), 'CMP'),
('PL_5', DATATYPES['PL_5'], (0, 1), 'CMP'),
('PL_6', DATATYPES['PL_6'], (0, 1), 'CMP'),
('PL_7', DATATYPES['PL_7'], (0, 1), 'CMP'),
('PL_8', DATATYPES['PL_8'], (0, 1), 'CMP'),
('PL_9', DATATYPES['PL_9'], (0, 1), 'CMP'),),
'PLN': (
('PLN_1', DATATYPES['PLN_1'], (0, 1), 'CMP'),
('PLN_2', DATATYPES['PLN_2'], (0, 1), 'CMP'),
('PLN_3', DATATYPES['PLN_3'], (0, 1), 'CMP'),
('PLN_4', DATATYPES['PLN_4'], (0, 1), 'CMP'),),
'PN': (
('PN_1', DATATYPES['PN_1'], (0, 1), 'CMP'),
('PN_2', DATATYPES['PN_2'], (0, 1), 'CMP'),
('PN_3', DATATYPES['PN_3'], (0, 1), 'CMP'),
('PN_4', DATATYPES['PN_4'], (0, 1), 'CMP'),
('PN_5', DATATYPES['PN_5'], (0, 1), 'CMP'),
('PN_6', DATATYPES['PN_6'], (0, 1), 'CMP'),),
'PPN': (
('PPN_1', DATATYPES['PPN_1'], (0, 1), 'CMP'),
('PPN_2', DATATYPES['PPN_2'], (0, 1), 'CMP'),
('PPN_3', DATATYPES['PPN_3'], (0, 1), 'CMP'),
('PPN_4', DATATYPES['PPN_4'], (0, 1), 'CMP'),
('PPN_5', DATATYPES['PPN_5'], (0, 1), 'CMP'),
('PPN_6', DATATYPES['PPN_6'], (0, 1), 'CMP'),
('PPN_7', DATATYPES['PPN_7'], (0, 1), 'CMP'),
('PPN_8', DATATYPES['PPN_8'], (0, 1), 'CMP'),
('PPN_9', DATATYPES['PPN_9'], (0, 1), 'CMP'),
('PPN_10', DATATYPES['PPN_10'], (0, 1), 'CMP'),
('PPN_11', DATATYPES['PPN_11'], (0, 1), 'CMP'),
('PPN_12', DATATYPES['PPN_12'], (0, 1), 'CMP'),
('PPN_13', DATATYPES['PPN_13'], (0, 1), 'CMP'),
('PPN_14', DATATYPES['PPN_14'], (0, 1), 'CMP'),
('PPN_15', DATATYPES['PPN_15'], (0, 1), 'CMP'),
('PPN_16', DATATYPES['PPN_16'], (0, 1), 'CMP'),
('PPN_17', DATATYPES['PPN_17'], (0, 1), 'CMP'),
('PPN_18', DATATYPES['PPN_18'], (0, 1), 'CMP'),
('PPN_19', DATATYPES['PPN_19'], (0, 1), 'CMP'),),
'PRL': (
('PRL_1', DATATYPES['PRL_1'], (0, 1), 'CMP'),
('PRL_2', DATATYPES['PRL_2'], (0, 1), 'CMP'),
('PRL_3', DATATYPES['PRL_3'], (0, 1), 'CMP'),),
'PT': (
('PT_1', DATATYPES['PT_1'], (0, 1), 'CMP'),
('PT_2', DATATYPES['PT_2'], (0, 1), 'CMP'),),
'PTA': (
('PTA_1', DATATYPES['PTA_1'], (0, 1), 'CMP'),
('PTA_2', DATATYPES['PTA_2'], (0, 1), 'CMP'),
('PTA_3', DATATYPES['PTA_3'], (0, 1), 'CMP'),),
'QIP': (
('QIP_1', DATATYPES['QIP_1'], (0, 1), 'CMP'),
('QIP_2', DATATYPES['QIP_2'], (0, 1), 'CMP'),),
'QSC': (
('QSC_1', DATATYPES['QSC_1'], (0, 1), 'CMP'),
('QSC_2', DATATYPES['QSC_2'], (0, 1), 'CMP'),
('QSC_3', DATATYPES['QSC_3'], (0, 1), 'CMP'),
('QSC_4', DATATYPES['QSC_4'], (0, 1), 'CMP'),),
'RCD': (
('RCD_1', DATATYPES['RCD_1'], (0, 1), 'CMP'),
('RCD_2', DATATYPES['RCD_2'], (0, 1), 'CMP'),
('RCD_3', DATATYPES['RCD_3'], (0, 1), 'CMP'),),
'RFR': (
('RFR_1', DATATYPES['RFR_1'], (0, 1), 'CMP'),
('RFR_2', DATATYPES['RFR_2'], (0, 1), 'CMP'),
('RFR_3', DATATYPES['RFR_3'], (0, 1), 'CMP'),
('RFR_4', DATATYPES['RFR_4'], (0, 1), 'CMP'),
('RFR_5', DATATYPES['RFR_5'], (0, 1), 'CMP'),
('RFR_6', DATATYPES['RFR_6'], (0, 1), 'CMP'),
('RFR_7', DATATYPES['RFR_7'], (0, 1), 'CMP'),),
'RI': (
('RI_1', DATATYPES['RI_1'], (0, 1), 'CMP'),
('RI_2', DATATYPES['RI_2'], (0, 1), 'CMP'),),
'RMC': (
('RMC_1', DATATYPES['RMC_1'], (0, 1), 'CMP'),
('RMC_2', DATATYPES['RMC_2'], (0, 1), 'CMP'),
('RMC_3', DATATYPES['RMC_3'], (0, 1), 'CMP'),),
'RP': (
('RP_1', DATATYPES['RP_1'], (0, 1), 'CMP'),
('RP_2', DATATYPES['RP_2'], (0, 1), 'CMP'),
('RP_3', DATATYPES['RP_3'], (0, 1), 'CMP'),
('RP_4', DATATYPES['RP_4'], (0, 1), 'CMP'),),
'SAD': (
('SAD_1', DATATYPES['SAD_1'], (0, 1), 'CMP'),
('SAD_2', DATATYPES['SAD_2'], (0, 1), 'CMP'),
('SAD_3', DATATYPES['SAD_3'], (0, 1), 'CMP'),),
'SCV': (
('SCV_1', DATATYPES['SCV_1'], (0, 1), 'CMP'),
('SCV_2', DATATYPES['SCV_2'], (0, 1), 'CMP'),),
'SN': (
('SN_1', DATATYPES['SN_1'], (0, 1), 'CMP'),
('SN_2', DATATYPES['SN_2'], (0, 1), 'CMP'),
('SN_3', DATATYPES['SN_3'], (0, 1), 'CMP'),
('SN_4', DATATYPES['SN_4'], (0, 1), 'CMP'),),
'SPD': (
('SPD_1', DATATYPES['SPD_1'], (0, 1), 'CMP'),
('SPD_2', DATATYPES['SPD_2'], (0, 1), 'CMP'),
('SPD_3', DATATYPES['SPD_3'], (0, 1), 'CMP'),
('SPD_4', DATATYPES['SPD_4'], (0, 1), 'CMP'),),
'SPS': (
('SPS_1', DATATYPES['SPS_1'], (0, 1), 'CMP'),
('SPS_2', DATATYPES['SPS_2'], (0, 1), 'CMP'),
('SPS_3', DATATYPES['SPS_3'], (0, 1), 'CMP'),
('SPS_4', DATATYPES['SPS_4'], (0, 1), 'CMP'),
('SPS_5', DATATYPES['SPS_5'], (0, 1), 'CMP'),
('SPS_6', DATATYPES['SPS_6'], (0, 1), 'CMP'),
('SPS_7', DATATYPES['SPS_7'], (0, 1), 'CMP'),),
'SRT': (
('SRT_1', DATATYPES['SRT_1'], (0, 1), 'CMP'),
('SRT_2', DATATYPES['SRT_2'], (0, 1), 'CMP'),),
'TQ': (
('TQ_1', DATATYPES['TQ_1'], (0, 1), 'CMP'),
('TQ_2', DATATYPES['TQ_2'], (0, 1), 'CMP'),
('TQ_3', DATATYPES['TQ_3'], (0, 1), 'CMP'),
('TQ_4', DATATYPES['TQ_4'], (0, 1), 'CMP'),
('TQ_5', DATATYPES['TQ_5'], (0, 1), 'CMP'),
('TQ_6', DATATYPES['TQ_6'], (0, 1), 'CMP'),
('TQ_7', DATATYPES['TQ_7'], (0, 1), 'CMP'),
('TQ_8', DATATYPES['TQ_8'], (0, 1), 'CMP'),
('TQ_9', DATATYPES['TQ_9'], (0, 1), 'CMP'),
('TQ_10', DATATYPES['TQ_10'], (0, 1), 'CMP'),
('TQ_11', DATATYPES['TQ_11'], (0, 1), 'CMP'),
('TQ_12', DATATYPES['TQ_12'], (0, 1), 'CMP'),),
'TS': (
('TS_1', DATATYPES['TS_1'], (0, 1), 'CMP'),
('TS_2', DATATYPES['TS_2'], (0, 1), 'CMP'),),
'TX_CHALLENGE': (
('TX_CHALLENGE_1', DATATYPES['TX_CHALLENGE_1'], (0, 1), 'CMP'),
('TX_CHALLENGE_2', DATATYPES['TX_CHALLENGE_2'], (0, 1), 'CMP'),),
'UVC': (
('UVC_1', DATATYPES['UVC_1'], (0, 1), 'CMP'),
('UVC_2', DATATYPES['UVC_2'], (0, 1), 'CMP'),),
'VH': (
('VH_1', DATATYPES['VH_1'], (0, 1), 'CMP'),
('VH_2', DATATYPES['VH_2'], (0, 1), 'CMP'),
('VH_3', DATATYPES['VH_3'], (0, 1), 'CMP'),
('VH_4', DATATYPES['VH_4'], (0, 1), 'CMP'),),
'VID': (
('VID_1', DATATYPES['VID_1'], (0, 1), 'CMP'),
('VID_2', DATATYPES['VID_2'], (0, 1), 'CMP'),
('VID_3', DATATYPES['VID_3'], (0, 1), 'CMP'),),
'VR': (
('VR_1', DATATYPES['VR_1'], (0, 1), 'CMP'),
('VR_2', DATATYPES['VR_2'], (0, 1), 'CMP'),),
'WVI': (
('WVI_1', DATATYPES['WVI_1'], (0, 1), 'CMP'),
('WVI_2', DATATYPES['WVI_2'], (0, 1), 'CMP'),),
'WVS': (
('WVS_1', DATATYPES['WVS_1'], (0, 1), 'CMP'),
('WVS_2', DATATYPES['WVS_2'], (0, 1), 'CMP'),),
'XAD': (
('XAD_1', DATATYPES['XAD_1'], (0, 1), 'CMP'),
('XAD_2', DATATYPES['XAD_2'], (0, 1), 'CMP'),
('XAD_3', DATATYPES['XAD_3'], (0, 1), 'CMP'),
('XAD_4', DATATYPES['XAD_4'], (0, 1), 'CMP'),
('XAD_5', DATATYPES['XAD_5'], (0, 1), 'CMP'),
('XAD_6', DATATYPES['XAD_6'], (0, 1), 'CMP'),
('XAD_7', DATATYPES['XAD_7'], (0, 1), 'CMP'),
('XAD_8', DATATYPES['XAD_8'], (0, 1), 'CMP'),
('XAD_9', DATATYPES['XAD_9'], (0, 1), 'CMP'),
('XAD_10', DATATYPES['XAD_10'], (0, 1), 'CMP'),
('XAD_11', DATATYPES['XAD_11'], (0, 1), 'CMP'),
('XAD_12', DATATYPES['XAD_12'], (0, 1), 'CMP'),),
'XCN': (
('XCN_1', DATATYPES['XCN_1'], (0, 1), 'CMP'),
('XCN_2', DATATYPES['XCN_2'], (0, 1), 'CMP'),
('XCN_3', DATATYPES['XCN_3'], (0, 1), 'CMP'),
('XCN_4', DATATYPES['XCN_4'], (0, 1), 'CMP'),
('XCN_5', DATATYPES['XCN_5'], (0, 1), 'CMP'),
('XCN_6', DATATYPES['XCN_6'], (0, 1), 'CMP'),
('XCN_7', DATATYPES['XCN_7'], (0, 1), 'CMP'),
('XCN_8', DATATYPES['XCN_8'], (0, 1), 'CMP'),
('XCN_9', DATATYPES['XCN_9'], (0, 1), 'CMP'),
('XCN_10', DATATYPES['XCN_10'], (0, 1), 'CMP'),
('XCN_11', DATATYPES['XCN_11'], (0, 1), 'CMP'),
('XCN_12', DATATYPES['XCN_12'], (0, 1), 'CMP'),
('XCN_13', DATATYPES['XCN_13'], (0, 1), 'CMP'),
('XCN_14', DATATYPES['XCN_14'], (0, 1), 'CMP'),
('XCN_15', DATATYPES['XCN_15'], (0, 1), 'CMP'),
('XCN_16', DATATYPES['XCN_16'], (0, 1), 'CMP'),
('XCN_17', DATATYPES['XCN_17'], (0, 1), 'CMP'),
('XCN_18', DATATYPES['XCN_18'], (0, 1), 'CMP'),),
'XON': (
('XON_1', DATATYPES['XON_1'], (0, 1), 'CMP'),
('XON_2', DATATYPES['XON_2'], (0, 1), 'CMP'),
('XON_3', DATATYPES['XON_3'], (0, 1), 'CMP'),
('XON_4', DATATYPES['XON_4'], (0, 1), 'CMP'),
('XON_5', DATATYPES['XON_5'], (0, 1), 'CMP'),
('XON_6', DATATYPES['XON_6'], (0, 1), 'CMP'),
('XON_7', DATATYPES['XON_7'], (0, 1), 'CMP'),
('XON_8', DATATYPES['XON_8'], (0, 1), 'CMP'),
('XON_9', DATATYPES['XON_9'], (0, 1), 'CMP'),),
'XPN': (
('XPN_1', DATATYPES['XPN_1'], (0, 1), 'CMP'),
('XPN_2', DATATYPES['XPN_2'], (0, 1), 'CMP'),
('XPN_3', DATATYPES['XPN_3'], (0, 1), 'CMP'),
('XPN_4', DATATYPES['XPN_4'], (0, 1), 'CMP'),
('XPN_5', DATATYPES['XPN_5'], (0, 1), 'CMP'),
('XPN_6', DATATYPES['XPN_6'], (0, 1), 'CMP'),
('XPN_7', DATATYPES['XPN_7'], (0, 1), 'CMP'),
('XPN_8', DATATYPES['XPN_8'], (0, 1), 'CMP'),
('XPN_9', DATATYPES['XPN_9'], (0, 1), 'CMP'),
('XPN_10', DATATYPES['XPN_10'], (0, 1), 'CMP'),
('XPN_11', DATATYPES['XPN_11'], (0, 1), 'CMP'),),
'XTN': (
('XTN_1', DATATYPES['XTN_1'], (0, 1), 'CMP'),
('XTN_2', DATATYPES['XTN_2'], (0, 1), 'CMP'),
('XTN_3', DATATYPES['XTN_3'], (0, 1), 'CMP'),
('XTN_4', DATATYPES['XTN_4'], (0, 1), 'CMP'),
('XTN_5', DATATYPES['XTN_5'], (0, 1), 'CMP'),
('XTN_6', DATATYPES['XTN_6'], (0, 1), 'CMP'),
('XTN_7', DATATYPES['XTN_7'], (0, 1), 'CMP'),
('XTN_8', DATATYPES['XTN_8'], (0, 1), 'CMP'),
('XTN_9', DATATYPES['XTN_9'], (0, 1), 'CMP'),),
}
for k, v in iteritems(DATATYPES):
if v[0] == 'sequence':
v[1] = DATATYPES_STRUCTS[v[2]]
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.platform import tf_logging as logging
def _is_sequence(seq):
return (isinstance(seq, collections.Sequence)
and not isinstance(seq, six.string_types))
def _packed_state_with_indices(structure, flat, index):
"""Helper function for _packed_state.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if _is_sequence(s):
new_index, child = _packed_state_with_indices(s, flat, index)
packed.append(type(s)(child))
index = new_index
else:
packed.append(flat[index])
index += 1
return (index, packed)
def _yield_unpacked_state(state):
for s in state:
if _is_sequence(s):
for si in _yield_unpacked_state(s):
yield si
else:
yield s
def _unpacked_state(state):
if not _is_sequence(state):
raise TypeError("state must be a sequence")
return type(state)(_yield_unpacked_state(state))
def _packed_state(structure, state):
"""Returns the flat state packed into a recursive tuple like structure.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists.
state: flattened state.
Returns:
packed: `state` converted to have the same recursive structure as
`structure`.
Raises:
TypeError: If structure or state is not a tuple or list.
ValueError: If state and structure have different element counts.
"""
if not _is_sequence(structure):
raise TypeError("structure must be a sequence")
if not _is_sequence(state):
raise TypeError("state must be a sequence")
flat_structure = _unpacked_state(structure)
if len(flat_structure) != len(state):
raise ValueError(
"Internal error: Could not pack state. Structure had %d elements, but "
"state had %d elements. Structure: %s, state: %s."
% (len(flat_structure), len(state), structure, state))
(_, packed) = _packed_state_with_indices(structure, state, 0)
return type(structure)(packed)
class RNNCell(object):
"""Abstract object representing an RNN cell.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
tuple of integers, then it results in a tuple of `len(state_size)` state
matrices, each with the a column size corresponding to values in `state_size`.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`,
or by calling the `rnn` ops several times. Every `RNNCell` must have the
properties below and and implement `__call__` with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""Integer or tuple of integers: size(s) of state(s) used by this cell."""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int, then the return value is a `2-D` tensor of
shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
state_size = self.state_size
if _is_sequence(state_size):
state_size_flat = _unpacked_state(state_size)
zeros_flat = [
array_ops.zeros(array_ops.pack([batch_size, s]), dtype=dtype)
for s in state_size_flat]
for s, z in zip(state_size_flat, zeros_flat):
z.set_shape([None, s])
zeros = _packed_state(structure=state_size, state=zeros_flat)
else:
zeros = array_ops.zeros(
array_ops.pack([batch_size, state_size]), dtype=dtype)
zeros.set_shape([None, state_size])
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = self._activation(_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, _linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=False, activation=tanh):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
@property
def state_size(self):
return ((self._num_units, self._num_units) if self._state_is_tuple
else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = (new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
return new_h, new_state
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: Deprecated and unused.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
(num_units, num_proj) if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
(num_units, num_units) if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
new_state = (c, m) if self._state_is_tuple else array_ops.concat(1, [c, m])
return m, new_state
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, num_proj, input_size=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
input_size: Deprecated and unused.
Raises:
TypeError: if cell is not an RNNCell.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(inputs, self._num_proj, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
embedding = vs.get_variable("embedding", [self._embedding_classes,
self._embedding_size],
initializer=initializer)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=False):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(_is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("Cell%d" % i):
if self._state_is_tuple:
if not _is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple
else array_ops.concat(1, new_states))
return cur_inp, new_states
class SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (_is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not _is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
bias_term = vs.get_variable(
"Bias", [output_size],
initializer=init_ops.constant_initializer(bias_start))
return res + bias_term
|
__author__ = 'vikesh'
"""
~wiki <query | topic> returns a wiki link for that <query>
"""
import re
try:
from urllib import quote
except ImportError:
from urllib2.request import quote
import requests
from bs4 import BeautifulSoup
def return_wiki(search_term):
search_term = quote(search_term)
url = "https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={0}&format=json"
url = url.format(search_term)
result = requests.get(url).json()
total_pages = result["query"]["search"]
total_pages = [p for p in total_pages if 'may refer to' not in p["snippets"]]
if not total_pages:
return ""
total_pages = quote(total_pages[0]["title"].encode("utf-8"))
link = "http://en.wikipedia.org/wiki/{0}".format(total_pages)
res = requests.get("https://en.wikipedia.org/w/api.php?format=json&action=parse&page={0}".format(total_pages)).json()
soup = BeautifulSoup(res["parse"]["text"]["*"], "html5lib")
p = soup.find('p').get_text()
p = p[:8000]
return u"{0}\n{1}".format(p, link)
def on_message(msg, server):
text = msg.get("text","")
match = re.findall(r"~wiki (.*)", text)
if not match:
return
search_term = match[0]
return return_wiki(search_term.encode("utf-8"))
|
import mimetypes
import os
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.errors import InvalidHeaderDefect, NonASCIILocalPartDefect
from email.header import Header
from email.headerregistry import Address
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, getaddresses, make_msgid, parseaddr
from io import BytesIO, StringIO
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return name, val
def split_addr(addr, encoding):
"""
Split the address into local part and domain, properly encoded.
When non-ascii characters are present in the local part, it must be
MIME-word encoded. The domain name must be idna-encoded if it contains
non-ascii characters.
"""
if '@' in addr:
localpart, domain = addr.split('@', 1)
# Try to get the simplest encoding - ascii if possible so that
# [email protected] doesn't become [email protected]. This
# makes unit testing a bit easier and more readable.
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = domain.encode('idna').decode('ascii')
else:
localpart = Header(addr, encoding).encode()
domain = ''
return (localpart, domain)
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
if not isinstance(addr, tuple):
addr = parseaddr(force_text(addr))
nm, addr = addr
localpart, domain = None, None
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN or non-ascii in the local part
localpart, domain = split_addr(addr, encoding)
# An `email.headerregistry.Address` object is used since
# email.utils.formataddr() naively encodes the name as ascii (see #25986).
if localpart and domain:
address = Address(nm, username=localpart, domain=domain)
return str(address)
try:
address = Address(nm, addr_spec=addr)
except (InvalidHeaderDefect, NonASCIILocalPartDefect):
localpart, domain = split_addr(addr, encoding)
address = Address(nm, username=localpart, domain=domain)
return str(address)
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8':
has_long_lines = any(
len(l.encode('utf-8')) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for l in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, it will be decoded as UTF-8. If that fails,
the mimetype will be set to DEFAULT_ATTACHMENT_MIME_TYPE and the
content is not decoded.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
if not mimetype:
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode('utf-8')
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's actually
# binary, read() will raise a UnicodeDecodeError on Python 3.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attaches a file from the filesystem.
The mimetype will be set to the DEFAULT_ATTACHMENT_MIME_TYPE if it is
not specified and cannot be guessed.
For a text/* mimetype (guessed or specified), the file's content
will be decoded as UTF-8. If that fails, the mimetype will be set to
DEFAULT_ATTACHMENT_MIME_TYPE and the content is not decoded.
"""
filename = os.path.basename(path)
with open(path, 'rb') as file:
content = file.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
"""The WaveBlocks Project
This file contains code for the delegation of the evaluation of homogeneous (or mixing)
inner products of two wavepackets. The class defined here can compute brakets, inner products
and expectation values and the matrix elements of an arbitrary operator.
@author: R. Bourquin
@copyright: Copyright (C) 2011, 2012, 2013, 2014 R. Bourquin
@license: Modified BSD License
"""
from numpy import zeros, complexfloating, sum, cumsum
from WaveBlocksND.InnerProduct import InnerProduct
__all__ = ["HomogeneousInnerProduct"]
class HomogeneousInnerProduct(InnerProduct):
r"""
"""
def __init__(self, delegate=None):
r"""
This class computes the homogeneous inner product
:math:`\langle\Psi|f|\Psi\rangle`.
:param delegate: The delegate inner product.
:type delegate: A :py:class:`Quadrature` subclass instance.
"""
# Pure convenience to allow setting of quadrature instance in constructor
self.set_delegate(delegate)
def __str__(self):
return "Homogeneous inner product computed by " + str(self._delegate)
def get_description(self):
r"""Return a description of this inner product object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
d = {}
d["type"] = "HomogeneousInnerProduct"
d["delegate"] = self._delegate.get_description()
return d
def quadrature(self, packet, operator=None, summed=False, component=None, diag_component=None, diagonal=False, eval_at_once=False):
r"""Delegates the evaluation of :math:`\langle\Psi|f|\Psi\rangle` for a general
function :math:`f(x)` with :math:`x \in \mathbb{R}^D`.
:param packet: The wavepacket :math:`\Psi`.
:param operator: A matrix-valued function :math:`f(x): \mathbb{R}^D \rightarrow \mathbb{R}^{N \times N}`.
:param summed: Whether to sum up the individual integrals :math:`\langle\Phi_i|f_{i,j}|\Phi_j\rangle`.
:type summed: Boolean, default is ``False``.
:param component: Request only the i-th component of the result. Remember that :math:`i \in [0, N^2-1]`.
:param diag_component: Request only the i-th component from the diagonal entries, here :math:`i \in [0, N-1]`.
Note that ``component`` takes precedence over ``diag_component`` if both are supplied. (Which is discouraged)
:param diagonal: Only return the diagonal elements :math:`\langle\Phi_i|f_{i,i}|\Phi_i\rangle`.
This is useful for diagonal operators :math:`f`.
:param eval_at_once: Flag to tell whether the operator supports the ``entry=(r,c)`` call syntax.
:type eval_at_once: Boolean, default is ``False``.
:return: The value of the braket :math:`\langle\Psi|f|\Psi\rangle`. This is either a scalar value or
a list of :math:`N^2` scalar elements depending on the value of ``summed``.
"""
# TODO: Consider adding 'is_diagonal' flag to make computations cheaper if we know the operator is diagonal
self._delegate.initialize_packet(packet)
self._delegate.initialize_operator(operator, eval_at_once=eval_at_once)
N = packet.get_number_components()
# Avoid unnecessary computations of other components
if component is not None:
rows = [component // N]
cols = [component % N]
elif diag_component is not None:
rows = [diag_component]
cols = [diag_component]
else:
rows = range(N)
cols = range(N)
self._delegate.prepare(rows, cols)
# Compute the quadrature
result = []
for row in rows:
for col in cols:
I = self._delegate.perform_quadrature(row, col)
result.append(I)
if summed is True:
result = sum(result)
elif component is not None or diag_component is not None:
# Do not return a list for quadrature of specific single components
result = result[0]
elif diagonal is True:
# Only keep the diagonal elements
result = [result[i * N + i] for i in range(N)]
return result
def build_matrix(self, packet, operator=None, eval_at_once=False):
r"""Delegates the computation of the matrix elements :math:`\langle\Psi|f|\Psi\rangle`
for a general function :math:`f(x)` with :math:`x \in \mathbb{R}^D`.
The matrix is computed without including the coefficients :math:`c^i_k`.
:param packet: The wavepacket :math:`\Psi`.
:param operator: A matrix-valued function :math:`f(q, x): \mathbb{R} \times \mathbb{R}^D \rightarrow \mathbb{R}^{N \times N}`.
:param eval_at_once: Flag to tell whether the operator supports the ``entry=(r,c)`` call syntax.
:type eval_at_once: Boolean, default is ``False``.
:return: A square matrix of size :math:`\sum_i^N |\mathfrak{K}_i| \times \sum_j^N |\mathfrak{K}_j|`.
"""
# TODO: Consider adding 'is_diagonal' flag to make computations cheaper if we know the operator is diagonal
self._delegate.initialize_packet(packet)
self._delegate.initialize_operator(operator, matrix=True, eval_at_once=eval_at_once)
N = packet.get_number_components()
K = [bs.get_basis_size() for bs in packet.get_basis_shapes()]
# The partition scheme of the block vectors and block matrix
partition = [0] + list(cumsum(K))
self._delegate.prepare(range(N), range(N))
# Compute the matrix elements
result = zeros((sum(K), sum(K)), dtype=complexfloating)
for row in range(N):
for col in range(N):
M = self._delegate.perform_build_matrix(row, col)
# Put the result into the global storage
result[partition[row]:partition[row + 1], partition[col]:partition[col + 1]] = M
return result
|
import numpy as np #Used for managing the matrices
from sklearn import datasets as ds #Used to import the iris dataset
from scipy import optimize #Use the wrapper of the BFGS algorithm in scipy
from matplotlib import pyplot as plt #Plot graphs
np.seterr(all='ignore') #Because numbers can be very small, this hides overflow & /0 errors
iris = ds.load_iris() #Load the data set
#Put the data and labels into an x and y matrix
x = iris.data
y = iris.target
#Then normalize the data into a range between 0 & 1
xM = x.max()
x = x/x.max()
y = y/y.max()
y = np.reshape(y, (150,1)) #Reshaped the y because it was supposedly in the wrong shape
class NeuralNetwork(object):
#Initialise the inital values for the NN
def __init__(self):
#Neural Network Model
self.inputSize = 4 #4 Inputs, sepal length/width and petal length/width
self.hiddenSize = 5 #Rounded mean of input & output, we'll see how well it works
self.outputSize = 1 #1 Output to classify which flower it is
#Create the weights randomly into a matrix of the same size as the number of nodes they are connected to
self.W1 = np.random.randn(self.inputSize, self.hiddenSize) #input -> hidden
self.W2 = np.random.randn(self.hiddenSize, self.outputSize) #hidden -> output
#Predict function, Use this after the network is trained to predict it by passing an array for the sizes and the number used to normalize the training data
def predict(self, x):
x = np.array(x)
prediction = self.forwardProp((x/x.max())) * 2 #Forward propagates the normalized array of data, then de-normalizes the output
if prediction < 0.5:
print("Flower Prediction: Setosa\nNumeric Prediction: "+ str(prediction[0])) #Then prints out the name of the flower via comparitives, as well as the value for prediction
elif prediction < 1.5:
print("Flower Prediction: Versicolor\nNumeric Prediction: "+ str(prediction[0]))
elif prediction < 2.5:
print("Flower Prediction: Virginica\nNumeric Prediction: "+ str(prediction[0]))
#Propagate the data forward through the network using sigmoid function as the activation function
def forwardProp(self, x):
self.z2 = np.dot(x, self.W1) #Z's are the dot product of the output from the previous nodes and the weights
self.a2 = self.sigmoid(self.z2) #A and yHat are the z's but with the activation function applied
self.z3 = np.dot(self.a2, self.W2)
self.yHat = self.sigmoid(self.z3)
return self.yHat
#Sigmoid equation for use as the activation function
def sigmoid(self, z):
return 1/(1+np.exp(-z))
#Cost function to work out how wrong we are when training - Used in gradient descent to reduce the cost
#Error = 0.5(target-predicted)^2
def costFunction(self, x, y):
self.yHat = self.forwardProp(x)
J = 0.5*sum((y-self.yHat)**2) #cost function to work out how wrong we were, the difference between the actual and predicted, squared then halved
return J
#Derived sigmoid function used in gradient descent as part of getting the overall gradient
def sigmoidDerived(self, z):
return ((np.exp(-z)) / ((1 + np.exp(-z))**2))
#Derived cost function also used in gradient descent as part of getting the overall gradient
#The function also works out how much to change the weights using the delta rule
#Change in weight = (target output-predicted) * derived function * input
def costFunctionDerived(self, X, y):
self.yHat = self.forwardProp(X)
#Weight Layer 1
delta3 = np.multiply(-(y-self.yHat), self.sigmoidDerived(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
#Weight Layer 2
delta2 = np.dot(delta3, self.W2.T)*self.sigmoidDerived(self.z2)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2
#Combines the 2 weights matrices into one
def getParams(self):
params = np.concatenate((self.W1.ravel(), self.W2.ravel()))
return params
#Reset weights from the new single matrix back into 2 matrices
def setParams(self, params):
W1_start = 0
W1_end = self.hiddenSize * self.inputSize
self.W1 = np.reshape(params[W1_start:W1_end], (self.inputSize , self.hiddenSize))
W2_end = W1_end + self.hiddenSize*self.outputSize
self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenSize, self.outputSize))
#Return the change in weights as one matrix
def computeGradients(self, X, y):
dJdW1, dJdW2 = self.costFunctionDerived(X, y)
return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))
#Reset the weights matrices then add the current cost to the list of costs
def callbackf(self, params):
self.setParams(params) #Reset the weight matrices
self.J.append(self.costFunction(self.X, self.Y)) #Add the cost of the current weights to the cost array
#Resets the weight matrices then passes the current costs and change in weights
def costFunctionWrapper(self, params, X, y):
self.setParams(params)
return self.costFunction(X, y), self.computeGradients(X, y)
#The main train function that uses scipy's optimizing wrapper of the BFGS algorithm
def train(self, X, y):
#Create variables for local use
self.X = X
self.Y = y
self.J = [] #Create list to hold costs for graph
params0 = self.getParams() #Get the weights in one matrix for optimization
options = {'maxiter': 3500, 'disp' : False} #Set options for optimization - set disp to true to get more details when training
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', args=(X, y), options=options, callback=self.callbackf) #And optimize
self.setParams(_res.x) #Set the new weights from the outcome of the optimization
#Seperate training function for a demo which uses 1 iter at a time
def train1(self, X, y):
self.X = X
self.Y = y
self.J = []
params0 = self.getParams()
options = {'maxiter': 1, 'disp' : False}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', args=(X, y), options=options, callback=self.callbackf)
self.setParams(_res.x)
#-----------------------------------------------Demos -----------------------------------------------#
#---Run the file for the main demo or import it to run the other demos or your own code on the net---#
#Main demo, includes example data - runs when file is ran alone
def demo():
net = NeuralNetwork()
costBefore = float(net.costFunction(x,y)[0])
net.train(x, y)
costAfter = float(net.costFunction(x,y)[0])
print("Cost Before: " + str(costBefore))
print("Cost After: " + str(costAfter))
print("Cost difference: " + str(costBefore - costAfter))
net.predict([6.7,5.2,2.5,1.6])
plt.plot(net.J)
plt.grid(1)
plt.xlabel('Iterations')
plt.ylabel('Cost')
plt.show()
#Gets the flower based on inputted values
def inp():
net = NeuralNetwork()
net.train(x,y)
sepalL = float(input("Sepal Length: "))
sepalW = float(input("Sepal Width : "))
PetalL = float(input("Petal Length: "))
PetalW = float(input("Petal Width : "))
inps = [sepalL,sepalW,PetalL,PetalW]
net.predict(inps)
#Shows interactive graph as net is being trained
def training(_iters):
js = np.array([])
net = NeuralNetwork()
plt.ion()
for iters in range(_iters):
net.train1(x, y)
js= np.append(js, net.J)
js.reshape(1, len(js))
plt.plot(js)
plt.grid(1)
plt.xlabel('Iterations')
plt.ylabel('Cost')
plt.show()
plt.pause(0.01)
while True: plt.pause(0.01)
if __name__ == "__main__":
demo()
|
import ddt
import httpretty
import mock
import requests
from requests import ConnectionError, Timeout
from ecommerce.core.url_utils import get_lms_url
from ecommerce.extensions.checkout.utils import get_provider_data
from ecommerce.tests.testcases import TestCase
@ddt.ddt
class UtilTests(TestCase):
@httpretty.activate
def test_get_provider_data(self):
"""
Check if correct data returns on the full filled request.
"""
httpretty.register_uri(
httpretty.GET, get_lms_url('api/credit/v1/providers/ASU'),
body='{"display_name": "Arizona State University"}',
content_type="application/json"
)
provider_data = get_provider_data('ASU')
self.assertDictEqual(provider_data, {"display_name": "Arizona State University"})
@httpretty.activate
def test_get_provider_data_unavailable_request(self):
"""
Check if None return on the bad request
"""
httpretty.register_uri(
httpretty.GET, get_lms_url('api/credit/v1/providers/ABC'),
status=400
)
provider_data = get_provider_data('ABC')
self.assertEqual(provider_data, None)
@ddt.data(ConnectionError, Timeout)
def test_exceptions(self, exception):
""" Verify the function returns None when a request exception is raised. """
with mock.patch.object(requests, 'get', mock.Mock(side_effect=exception)):
self.assertIsNone(get_provider_data('ABC'))
|
#
# Test cases for the methods in the parted.constraint module itself
#
# Copyright (C) 2009-2011 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Cantrell <[email protected]>
#
import _ped
import parted
import unittest
from tests.baseclass import RequiresDevice
# One class per method, multiple tests per class. For these simple methods,
# that seems like good organization. More complicated methods may require
# multiple classes and their own test suite.
class ConstraintNewTestCase(RequiresDevice):
def runTest(self):
align1 = parted.Alignment(offset=10, grainSize=5)
align2 = parted.Alignment(offset=10, grainSize=5)
geom1 = parted.Geometry(device=self.device, start=0, length=50)
geom2 = parted.Geometry(device=self.device, start=0, length=100)
# Check that not passing enough args to parted.Constraint.__init__
# is caught.
self.assertRaises(parted.ConstraintException, parted.Constraint)
self.assertRaises(parted.ConstraintException, parted.Constraint,
startAlign=align1, endAlign=align2)
# And then the correct ways of creating a _ped.Constraint.
c = parted.Constraint(minGeom=geom1, maxGeom=geom2)
self.assertIsInstance(c, parted.Constraint)
c = parted.Constraint(minGeom=geom1)
self.assertIsInstance(c, parted.Constraint)
c = parted.Constraint(maxGeom=geom2)
self.assertIsInstance(c, parted.Constraint)
c = parted.Constraint(exactGeom=geom1)
self.assertIsInstance(c, parted.Constraint)
c = parted.Constraint(device=self.device)
self.assertIsInstance(c, parted.Constraint)
c = parted.Constraint(startAlign=align1, endAlign=align2,
startRange=geom1, endRange=geom2,
minSize=10, maxSize=100)
self.assertIsInstance(c, parted.Constraint)
# Use a _ped.Constraint as the initializer
pc = _ped.Constraint(align1.getPedAlignment(),
align2.getPedAlignment(),
geom1.getPedGeometry(),
geom2.getPedGeometry(),
10, 100)
c = parted.Constraint(PedConstraint=pc)
self.assertIsInstance(c, parted.Constraint)
self.assertEqual(c.getPedConstraint(), pc)
class ConstraintGetSetTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
align1 = parted.Alignment(offset=10, grainSize=5)
align2 = parted.Alignment(offset=10, grainSize=5)
geom1 = parted.Geometry(device=self.device, start=0, length=50)
geom2 = parted.Geometry(device=self.device, start=25, length=50)
self.c = parted.Constraint(startAlign=align1, endAlign=align2,
startRange=geom1, endRange=geom2,
minSize=10, maxSize=100)
def runTest(self):
# Test that properties work
self.assertEqual(self.c.minSize, 10)
self.assertEqual(self.c.maxSize, 100)
self.assertIsInstance(self.c.startAlign, parted.Alignment)
self.assertIsInstance(self.c.endAlign, parted.Alignment)
self.assertIsInstance(self.c.startRange, parted.Geometry)
self.assertIsInstance(self.c.endRange, parted.Geometry)
# Test that setting directly and getting with getattr works.
self.c.minSize = 15
self.c.maxSize = 75
self.assertEqual(getattr(self.c, "minSize"), 15)
self.assertEqual(getattr(self.c, "maxSize"), 75)
self.assertIsInstance(getattr(self.c, "startAlign"), parted.Alignment)
self.assertIsInstance(getattr(self.c, "endAlign"), parted.Alignment)
self.assertIsInstance(getattr(self.c, "startRange"), parted.Geometry)
self.assertIsInstance(getattr(self.c, "endRange"), parted.Geometry)
# Test that setting with setattr and getting directly works.
setattr(self.c, "minSize", 10)
setattr(self.c, "maxSize", 90)
self.assertEqual(self.c.minSize, 10)
self.assertEqual(self.c.maxSize, 90)
# Test that values have the right type.
self.assertRaises(TypeError, setattr, self.c, "minSize", "string")
# Test that looking for invalid attributes fails properly.
self.assertRaises(AttributeError, getattr, self.c, "blah")
self.assertRaises(AttributeError, setattr, self.c, "startRange", 47)
self.assertRaises(AttributeError, setattr, self.c, "endRange", 47)
@unittest.skip("Unimplemented test case.")
class ConstraintIntersectTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class ConstraintSolveMaxTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class ConstraintSolveNearestTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class ConstraintIsSolutionTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class ConstraintGetPedConstraintTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class ConstraintStrTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
# And then a suite to hold all the test cases for this module.
def makeSuite():
suite = unittest.TestSuite()
suite.addTest(ConstraintNewTestCase())
suite.addTest(ConstraintGetSetTestCase())
suite.addTest(ConstraintIntersectTestCase())
suite.addTest(ConstraintSolveMaxTestCase())
suite.addTest(ConstraintSolveNearestTestCase())
suite.addTest(ConstraintIsSolutionTestCase())
suite.addTest(ConstraintGetPedConstraintTestCase())
suite.addTest(ConstraintStrTestCase())
return suite
s = makeSuite()
if __name__ == "__main__":
unittest.main(defaultTest='s', verbosity=2)
|
# -*- coding: utf-8 -*-
import webapp2
from webapp2_extras import auth, sessions, jinja2
from jinja2.runtime import TemplateNotFound
from google.appengine.api import users
import logging
class BaseRequestHandler(webapp2.RequestHandler):
def controller(self,controllerClass):
controller = controllerClass();
oauth_user_id = self.current_session_user_id()
if oauth_user_id is not None:
controller.oauth_user_id = oauth_user_id
google_user = users.get_current_user()
if google_user is not None:
controller.google_user = google_user
controller.username = self.request.cookies.get('username')
return controller
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def jinja2(self):
"""Returns a Jinja2 renderer cached in the app registry"""
return jinja2.get_jinja2(app=self.app)
@webapp2.cached_property
def session(self):
"""Returns a session using the default cookie key"""
return self.session_store.get_session()
@webapp2.cached_property
def auth(self):
return auth.get_auth()
@webapp2.cached_property
def current_user(self):
"""Returns currently logged in user"""
user_id = self.current_session_user_id()
if user_id is not None:
return self.auth.store.user_model.get_by_id(user_id)
return None
def current_session_user_id(self):
session_dict = self.auth.get_session_data(pop=False)
if session_dict is not None:
return session_dict['user_id']
return None
@webapp2.cached_property
def logged_in(self):
"""Returns true if a user is currently logged in, false otherwise"""
return self.current_session_user_id() is not None or users.get_current_user() is not None
def render(self, template_name, template_vars={}):
# Preset values for the template
values = {
'url_for': self.uri_for,
'logged_in': self.logged_in,
'flashes': self.session.get_flashes()
}
# Add manually supplied template values
values.update(template_vars)
# read the template or 404.html
try:
self.response.write(self.jinja2.render_template(template_name, **values))
except TemplateNotFound:
self.abort(404)
def head(self, *args):
"""Head is used by Twitter. If not there the tweet button shows 0"""
pass
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json, sys
from frappe import _
from frappe.utils import cint, flt, now, cstr, strip_html
from frappe.model import default_fields
from frappe.model.naming import set_new_name
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in d.iteritems():
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in d.iteritems():
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value):
if isinstance(value, list):
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
return value
else:
raise ValueError
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError, key
value = BaseDocument(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self):
d = {}
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
for key in valid:
if key not in self.__dict__:
self.__dict__[key] = None
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False):
doc = self.get_valid_dict()
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in doc.keys():
if doc[k] is None:
del doc[k]
if self.get("_user_tags"):
doc["_user_tags"] = self.get("_user_tags")
if self.get("__islocal"):
doc["__islocal"] = 1
elif self.get("__onload"):
doc["__onload"] = self.get("__onload")
return doc
def as_json(self):
return json.dumps(self.as_dict(), indent=1, sort_keys=True)
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
set_new_name(self)
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), d.values())
except Exception, e:
if e.args[0]==1062:
type, value, traceback = sys.exc_info()
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.NameError, (self.doctype, self.name, e), traceback
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict()
columns = d.keys()
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), d.values() + [d.get("name")])
def db_set(self, fieldname, value):
self.set(fieldname, value)
self.set("modified", now())
self.set("modified_by", frappe.session.user)
frappe.db.set_value(self.doctype, self.name, fieldname, value, self.modified, self.modified_by)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} #{}: {}: {}".format(_("Error"), _("Row"), self.idx,
_("Value missing for"), _(df.label))
else:
return "{}: {}: {}".format(_("Error"), _("Value missing for"), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": 1}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
return missing
def get_invalid_links(self, is_submittable=False):
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in self.meta.get_link_fields() + self.meta.get("fields",
{"fieldtype":"Dynamic Link"}):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
value = frappe.db.get_value(doctype, docname)
setattr(self, df.fieldname, value)
if not value:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import:
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": 1})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
if self.get(fieldname) != values.get(fieldname):
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_update_after_submit(self):
current = frappe.db.get_value(self.doctype, self.name, "*", as_dict=True)
for key, value in current.iteritems():
df = self.meta.get_field(key)
if df and not df.allow_on_submit and (self.get(key) or value) and self.get(key) != value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def get_formatted(self, fieldname, doc=None, currency=None):
from frappe.utils.formatters import format_value
return format_value(self.get(fieldname), self.meta.get_field(fieldname),
doc=doc or self, currency=currency)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out = []
for d in data:
add = True
for f in filters:
fval = filters[f]
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif not isinstance(fval, (tuple, list)):
if isinstance(fval, basestring) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
|
from fabric.api import task, run, execute
BREW = '/usr/local/bin/brew '
BREW_CONFIG_DIR = 'brew/'
NPM = '/usr/local/bin/npm '
NPM_CONFIG_DIR = 'npm/'
@task
def install_homebrew():
run('ruby -e "$(curl -fsSL https://raw.github.com/Homebrew/homebrew/go/install)"')
run(BREW + ' install phinze/cask/brew-cask')
@task
def update_brew():
run(BREW + 'update')
run(BREW + 'upgrade')
@task
def install_from_brew():
execute(tab_brews)
execute(install_brew_apps)
@task
def tab_brews():
with open(BREW_CONFIG_DIR + 'taps.txt', 'r') as f:
taps = f.read().splitlines()
for tap in taps:
run('%s tap %s ' % (BREW, tap))
@task
def install_brew_apps():
with open(BREW_CONFIG_DIR + 'tools.txt', 'r') as f:
tools = f.read().splitlines()
for tool in tools:
run('%s install %s ' % (BREW, tool))
@task
def npm_install_global():
with open(NPM_CONFIG_DIR + 'global-tools.txt', 'r') as f:
tools = f.read().splitlines()
for tool in tools:
run('%s install -g --upgrade %s' % (NPM, tool))
def bootstrap():
execute(install_homebrew)
execute(update_brew)
execute(install_brew_apps)
execute(npm_install_global)
|
"""
Use dawg to speed up fuzzy search
"""
import itertools
import dawg
from Levenshtein import distance
from polyleven import levenshtein as lvdistance
def fast_fuzzysearch(words, ed):
if ed == 1:
return Fast1FuzzySearch(words)
elif ed == 2:
return Fast2FuzzySearch(words)
else:
raise ValueError("Currently only supports edit distance up to 2")
class Fast2FuzzySearch(object):
"""
Fuzzy sttring matching for arbitrary edit distance. (ed<=4) is useful.
After that distance is faster.
"""
_ed = 2
def __init__(self, words):
self.ffs = {
1: Fast1FuzzySearch(words)
}
modified_words = list(zip(*[
# wL wR rmL rmF
(w[:len(w) // 2], w[len(w) // 2:], w[:-1], w[1:])
for w in words
]))
self.ffs[2] = [Fast1FuzzySearch(ws) for ws in modified_words]
def query(self, w, ed=2):
assert ed <= self._ed
w = str(w)
n = len(w)
res_iter_list = []
if ed <= 1:
return self.ffs[1].query(w, ed)
# 0 error on prefix, 2 no suffix
res_iter_list.append(self.ffs[1].words_with_prefix(w[:n // 2]))
# 1 error on left
res_iter_list.extend(
self.ffs[1].words_with_prefix(tw)
for tw in self.ffs[2][0].query(w[:n // 2])
)
# 1 error on right
res_iter_list.extend(
self.ffs[1].words_with_suffix(tw)
for tw in self.ffs[2][1].query(w[n // 2:])
)
# first character deleted or replaced
res_iter_list.extend(
self.ffs[1].words_with_prefix(tw)
for tw in self.ffs[2][2].query(w[1:])
)
# Last character deleted or replaced
res_iter_list.extend(
self.ffs[1].words_with_suffix(tw)
for tw in self.ffs[2][2].query(w[:-1])
)
# 2 error on prefix, 0 on suffix
res_iter_list.append(self.ffs[1].words_with_suffix(w[n // 2:]))
all_options = set(w for iter_ in res_iter_list for w in iter_)
# print "Len options to explore: {}".format(len(all_options))
return [
_w
for _w in all_options
if abs(len(_w) - len(w)) <= self._ed and
lvdistance(_w, w, self._ed) <= self._ed
]
class Fast1FuzzySearch(object):
"""This is an implementation of fuzzy string matching using dawgs.
Good for only edit distance 1. Idea is for the query take word
and look at words with similar prifix, or the ones with simlar
suffix. We are looking for words at distance 1, so, the edit must
be either on the first half of the word, or on the last half, and
we can safely check that using prefix, and suffix match.
"""
_ed = 1
def __init__(self, words):
# good for 1 edit distance
self._L, self._R = self._process_list(list(set(words)))
def _process_list(self, words):
rev_words = [w[::-1] for w in words]
norm_dawg = dawg.CompletionDAWG(words)
rev_dawg = dawg.CompletionDAWG(rev_words)
return norm_dawg, rev_dawg
def words_with_prefix(self, prefix):
return self._L.iterkeys(str(prefix))
def words_with_suffix(self, suffix):
return (w[::-1] for w in self._R.iterkeys(str(suffix[::-1])))
def query(self, w, ed=1): # Can only handle ed=1
"""
Finds the fuzzy matches (within edit distance 1) of w from words
"""
assert ed <= self._ed
if ed == 0:
return [w] if w in self._L else ['']
w = str(w)
n = len(w)
prefix, suffix = w[:n // 2], w[n // 2:][::-1]
options_w_prefix = self._L.keys(prefix)
options_w_suffix = [x[::-1] for x in self._R.iterkeys(suffix)]
return [
_w
for _w in set(itertools.chain(options_w_prefix, options_w_suffix))
if abs(len(_w) - len(w)) <= 1 and lvdistance(str(_w), str(w), 1) <= 1
]
if __name__ == "__main__":
test_FastFuzzySearch()
|
import logging
import os
import subprocess
import sys
import threading
import types
from datetime import datetime
import pytest
from porcupine import _logs
# should be possible to start many porcupines at almost exactly the same time
def test_race_conditions():
timed_out = [False] * 10
def thread_target(index):
try:
subprocess.run([sys.executable, '-m', 'porcupine'], timeout=2, stdout=subprocess.DEVNULL)
except subprocess.TimeoutExpired:
timed_out[index] = True
threads = [
threading.Thread(name=f'race-thread-{i}', target=thread_target, args=[i])
for i in range(len(timed_out))
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert timed_out == [True] * len(timed_out)
def test_remove_old_logs(monkeypatch, caplog):
long_time_ago = datetime(year=1987, month=6, day=5, hour=4, minute=3, second=2)
with monkeypatch.context() as monkey:
monkey.setattr(_logs, 'datetime', types.SimpleNamespace(now=(lambda: long_time_ago)))
_logs._open_log_file().close()
_logs._open_log_file().close()
_logs._open_log_file().close()
_logs._open_log_file().close()
caplog.set_level(logging.INFO)
_logs._remove_old_logs()
text = caplog.text
assert f'logs{os.sep}1987-06-05T04-03-02.txt is more than 7 days old, removing' in text
assert f'logs{os.sep}1987-06-05T04-03-02_1.txt is more than 7 days old, removing' in text
assert f'logs{os.sep}1987-06-05T04-03-02_2.txt is more than 7 days old, removing' in text
assert f'logs{os.sep}1987-06-05T04-03-02_3.txt is more than 7 days old, removing' in text
def test_log_path_printed(mocker):
mocker.patch('porcupine._logs.print')
_logs.print.side_effect = ZeroDivisionError # to make it stop when it prints
with pytest.raises(ZeroDivisionError):
_logs.setup(None)
_logs.print.assert_called_once()
printed = _logs.print.call_args[0][0]
assert printed.startswith('log file: ')
assert os.path.isfile(printed[len('log file: '):])
|
"""
Matrix functions that use Pade approximation with inverse scaling and squaring.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
from scipy.linalg.decomp_schur import schur, rsf2csf
from scipy.linalg.matfuncs import funm
from scipy.linalg import svdvals, solve_triangular
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import onenormest
import scipy.special
class LogmRankWarning(UserWarning):
pass
class LogmExactlySingularWarning(LogmRankWarning):
pass
class LogmNearlySingularWarning(LogmRankWarning):
pass
class LogmError(np.linalg.LinAlgError):
pass
class FractionalMatrixPowerError(np.linalg.LinAlgError):
pass
#TODO renovate or move this class when scipy operators are more mature
class _MatrixM1PowerOperator(LinearOperator):
"""
A representation of the linear operator (A - I)^p.
"""
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0 or p != int(p):
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def matvec(self, x):
for i in range(self._p):
x = self._A.dot(x) - x
return x
def rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A) - x
return x
def matmat(self, X):
for i in range(self._p):
X = self._A.dot(X) - X
return X
@property
def T(self):
return _MatrixM1PowerOperator(self._A.T, self._p)
#TODO renovate or move this function when scipy operators are more mature
def _onenormest_m1_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of (A - I)^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return onenormest(_MatrixM1PowerOperator(A, p),
t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
def _unwindk(z):
"""
Compute the scalar unwinding number.
Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
Note that this definition differs in sign from the original definition
in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
Parameters
----------
z : complex
A complex number.
Returns
-------
unwinding_number : integer
The scalar unwinding number of z.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
.. [2] Robert M. Corless and David J. Jeffrey,
"The unwinding number." Newsletter ACM SIGSAM Bulletin
Volume 30, Issue 2, June 1996, Pages 28-35.
.. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
David J. Jeffrey and Stephen M. Watt,
"Reasoning about the elementary functions of complex analysis"
Annals of Mathematics and Artificial Intelligence,
36: 303-318, 2002.
"""
return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
def _briggs_helper_function(a, k):
"""
Computes r = a^(1 / (2^k)) - 1.
This is algorithm (2) of [1]_.
The purpose is to avoid a danger of subtractive cancellation.
For more computational efficiency it should probably be cythonized.
Parameters
----------
a : complex
A complex number preferably belonging to the closed negative real axis.
k : integer
A nonnegative integer.
Returns
-------
r : complex
The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
Notes
-----
The algorithm as written in the publication does not handle k=0 or k=1
correctly, so these are special-cased in this implementation.
This function is intended to not allow `a` to belong to the closed
negative real axis, but this is constraint is relaxed.
References
----------
.. [1] Awad H. Al-Mohy (2012)
"A more accurate Briggs method for the logarithm",
Numerical Algorithms, 59 : 393--402.
"""
if k < 0 or int(k) != k:
raise ValueError('expected a nonnegative integer k')
if k == 0:
return a - 1
elif k == 1:
return np.sqrt(a) - 1
else:
k_hat = k
if np.angle(a) >= np.pi / 2:
a = np.sqrt(a)
k_hat = k - 1
z0 = a - 1
a = np.sqrt(a)
r = 1 + a
for j in range(1, k_hat):
a = np.sqrt(a)
r = r * (1 + a)
r = z0 / r
return r
def _fractional_power_superdiag_entry(l1, l2, t12, p):
"""
Compute a superdiagonal entry of a fractional matrix power.
This is Eq. (5.6) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
p : float
A fractional power.
Returns
-------
f12 : complex
A superdiagonal entry of the fractional matrix power.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if l1 == l2:
f12 = t12 * p * l1**(p-1)
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
else:
# This is Eq. (5.5) in [1].
z = (l2 - l1) / (l2 + l1)
log_l1 = np.log(l1)
log_l2 = np.log(l2)
arctanh_z = np.arctanh(z)
tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
tmp_u = _unwindk(log_l2 - log_l1)
if tmp_u:
tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
else:
tmp_b = p * arctanh_z
tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
f12 = tmp_a * tmp_c
return f12
def _logm_superdiag_entry(l1, l2, t12):
"""
Compute a superdiagonal entry of a matrix logarithm.
This is Eq. (11.28) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
Returns
-------
f12 : complex
A superdiagonal entry of the matrix logarithm.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
"""
if l1 == l2:
f12 = t12 / l1
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
else:
z = (l2 - l1) / (l2 + l1)
ua = _unwindk(np.log(l2) - np.log(l1))
ub = _unwindk(np.log(1+z) - np.log(1-z))
u = ua + ub
if u:
f12 = t12 * (2*np.arctanh(z) + 2*np.pi*1j*(ua + ub)) / (l2 - l1)
else:
f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
return f12
def _inverse_squaring_helper(T0, theta):
"""
A helper function for inverse scaling and squaring for Pade approximation.
Parameters
----------
T0 : (N, N) array_like upper triangular
Matrix involved in inverse scaling and squaring.
theta : indexable
The values theta[1] .. theta[7] must be available.
They represent bounds related to Pade approximation, and they depend
on the matrix function which is being computed.
For example, different values of theta are required for
matrix logarithm than for fractional matrix power.
Returns
-------
R : (N, N) array_like upper triangular
Composition of zero or more matrix square roots of T0, minus I.
s : non-negative integer
Number of square roots taken.
m : positive integer
The degree of the Pade approximation.
Notes
-----
This subroutine appears as a chunk of lines within
a couple of published algorithms; for example it appears
as lines 4--35 in algorithm (3.1) of [1]_, and
as lines 3--34 in algorithm (4.1) of [2]_.
The instances of 'goto line 38' in algorithm (3.1) of [1]_
probably mean 'goto line 36' and have been intepreted accordingly.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T0.shape
T = T0
# Find s0, the smallest s such that the spectral radius
# of a certain diagonal matrix is at most theta[7].
# Note that because theta[7] < 1,
# this search will not terminate if any diagonal entry of T is zero.
s0 = 0
tmp_diag = np.diag(T)
if np.count_nonzero(tmp_diag) != n:
raise Exception('internal inconsistency')
while np.max(np.absolute(tmp_diag - 1)) > theta[7]:
tmp_diag = np.sqrt(tmp_diag)
s0 += 1
# Take matrix square roots of T.
for i in range(s0):
T = _sqrtm_triu(T)
# Flow control in this section is a little odd.
# This is because I am translating algorithm descriptions
# which have GOTOs in the publication.
s = s0
k = 0
d2 = _onenormest_m1_power(T, 2) ** (1/2)
d3 = _onenormest_m1_power(T, 3) ** (1/3)
a2 = max(d2, d3)
m = None
for i in (1, 2):
if a2 <= theta[i]:
m = i
break
while m is None:
if s > s0:
d3 = _onenormest_m1_power(T, 3) ** (1/3)
d4 = _onenormest_m1_power(T, 4) ** (1/4)
a3 = max(d3, d4)
if a3 <= theta[7]:
j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
if j1 <= 6:
m = j1
break
elif a3 / 2 <= theta[5] and k < 2:
k += 1
T = _sqrtm_triu(T)
s += 1
continue
d5 = _onenormest_m1_power(T, 5) ** (1/5)
a4 = max(d4, d5)
eta = min(a3, a4)
for i in (6, 7):
if eta <= theta[i]:
m = i
break
if m is not None:
break
T = _sqrtm_triu(T)
s += 1
# The subtraction of the identity is redundant here,
# because the diagonal will be replaced for improved numerical accuracy,
# but this formulation should help clarify the meaning of R.
R = T - np.identity(n)
# Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
# using formulas that have less subtractive cancellation.
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
for j in range(n):
a = T0[j, j]
r = _briggs_helper_function(a, s)
R[j, j] = r
p = np.exp2(-s)
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
R[j, j+1] = f12
# Return the T-I matrix, the number of square roots, and the Pade degree.
if not np.array_equal(R, np.triu(R)):
raise Exception('internal inconsistency')
return R, s, m
def _fractional_power_pade_constant(i, t):
# A helper function for matrix fractional power.
if i < 1:
raise ValueError('expected a positive integer i')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
if i == 1:
return -t
elif i % 2 == 0:
j = i // 2
return (-j + t) / (2 * (2*j - 1))
elif i % 2 == 1:
j = (i - 1) // 2
return (-j - t) / (2 * (2*j + 1))
else:
raise Exception('internal error')
def _fractional_power_pade(R, t, m):
"""
Evaluate the Pade approximation of a fractional matrix power.
Evaluate the degree-m Pade approximation of R
to the fractional matrix power t using the continued fraction
in bottom-up fashion using algorithm (4.1) in [1]_.
Parameters
----------
R : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
m : positive integer
Degree of Pade approximation.
Returns
-------
U : (N, N) array_like
The degree-m Pade approximation of R to the fractional power t.
This matrix will be upper triangular.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if m < 1 or int(m) != m:
raise ValueError('expected a positive integer m')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
R = np.asarray(R)
if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = R.shape
ident = np.identity(n)
Y = R * _fractional_power_pade_constant(2*m, t)
for j in range(2*m - 1, 0, -1):
rhs = R * _fractional_power_pade_constant(j, t)
Y = solve_triangular(ident + Y, rhs)
U = ident + Y
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power_triu(T, t):
"""
Compute a fractional power of an upper triangular matrix.
The fractional power is restricted to fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
m_to_theta = {
1: 1.51e-5,
2: 2.24e-3,
3: 1.88e-2,
4: 6.04e-2,
5: 1.24e-1,
6: 2.00e-1,
7: 2.79e-1,
}
n, n = T.shape
T0 = T
T0_diag = np.diag(T0)
if np.array_equal(T0, np.diag(T0_diag)):
U = np.diag(T0_diag ** t)
else:
R, s, m = _inverse_squaring_helper(T0, m_to_theta)
# Evaluate the Pade approximation.
# Note that this function expects the negative of the matrix
# returned by the inverse squaring helper.
U = _fractional_power_pade(-R, t, m)
# Undo the inverse scaling and squaring.
# Be less clever about this
# if the principal branch does not exist at T0;
# this happens when a diagonal entry of T0
# is negative with imaginary part 0.
eivals = np.diag(T0)
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
for i in range(s, -1, -1):
if i < s:
U = U.dot(U)
else:
if has_principal_branch:
p = t * np.exp2(-i)
U[np.diag_indices(n)] = T0_diag ** p
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
U[j, j+1] = f12
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power(A, t):
"""
Compute the fractional power of a matrix, for fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# This code block is copied from numpy.matrix_power().
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('input must be a square array')
# Get the number of rows and columns.
n, n = A.shape
# Triangularize the matrix if necessary,
# attempting to preserve dtype if possible.
if np.array_equal(A, np.triu(A)):
Z = None
T = A
else:
if np.isrealobj(A):
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
# Zeros on the diagonal of the triangular matrix are forbidden,
# because the inverse scaling and squaring cannot deal with it.
T_diag = np.diag(T)
if np.count_nonzero(T_diag) != n:
raise FractionalMatrixPowerError(
'cannot use inverse scaling and squaring to find '
'the fractional matrix power of a singular matrix')
# If the triangular matrix is real and has a negative
# entry on the diagonal, then force the matrix to be complex.
if np.isrealobj(T) and np.min(T_diag) < 0:
T = T.astype(complex)
# Get the fractional power of the triangular matrix,
# and de-triangularize it if necessary.
U = _remainder_matrix_power_triu(T, t)
if Z is not None:
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
else:
return U
def _fractional_matrix_power(A, p):
"""
Compute the fractional power of a matrix.
See the fractional_matrix_power docstring in matfuncs.py for more info.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
if p == int(p):
return np.linalg.matrix_power(A, int(p))
# Compute singular values.
s = svdvals(A)
# Inverse scaling and squaring cannot deal with a singular matrix,
# because the process of repeatedly taking square roots
# would not converge to the identity matrix.
if s[-1]:
# Compute the condition number relative to matrix inversion,
# and use this to decide between floor(p) and ceil(p).
k2 = s[0] / s[-1]
p1 = p - np.floor(p)
p2 = p - np.ceil(p)
if p1 * k2 ** (1 - p1) <= -p2 * k2:
a = int(np.floor(p))
b = p1
else:
a = int(np.ceil(p))
b = p2
try:
R = _remainder_matrix_power(A, b)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
except np.linalg.LinAlgError as e:
pass
# If p is negative then we are going to give up.
# If p is non-negative then we can fall back to generic funm.
if p < 0:
X = np.empty_like(A)
X.fill(np.nan)
return X
else:
p1 = p - np.floor(p)
a = int(np.floor(p))
b = p1
R, info = funm(A, lambda x: pow(x, b), disp=False)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
def _logm_triu(T):
"""
Compute matrix logarithm of an upper triangular matrix.
The matrix logarithm is the inverse of
expm: expm(logm(`T`)) == `T`
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `T`
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
T = np.asarray(T)
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T.shape
# Construct T0 with the appropriate type,
# depending on the dtype and the spectrum of T.
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if keep_it_real:
T0 = T
else:
T0 = T.astype(complex)
# Define bounds given in Table (2.1).
theta = (None,
1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
R, s, m = _inverse_squaring_helper(T0, theta)
# Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
# This requires the nodes and weights
# corresponding to degree-m Gauss-Legendre quadrature.
# These quadrature arrays need to be transformed from the [-1, 1] interval
# to the [0, 1] interval.
nodes, weights = scipy.special.p_roots(m)
nodes = nodes.real
if nodes.shape != (m,) or weights.shape != (m,):
raise Exception('internal error')
nodes = 0.5 + 0.5 * nodes
weights = 0.5 * weights
ident = np.identity(n)
U = np.zeros_like(R)
for alpha, beta in zip(weights, nodes):
U += solve_triangular(ident + beta*R, alpha*R)
U *= np.exp2(s)
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
# Recompute diagonal entries of U.
U[np.diag_indices(n)] = np.log(np.diag(T0))
# Recompute superdiagonal entries of U.
# This indexing of this code should be renovated
# when newer np.diagonal() becomes available.
for i in range(n-1):
l1 = T0[i, i]
l2 = T0[i+1, i+1]
t12 = T0[i, i+1]
U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
# Return the logm of the upper triangular matrix.
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
# The input matrix should be upper triangular.
# The eps is ad hoc and is not meant to be machine precision.
tri_eps = 1e-20
abs_diag = np.absolute(np.diag(T))
if np.any(abs_diag == 0):
exact_singularity_msg = 'The logm input matrix is exactly singular.'
warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
if not inplace:
T = T.copy()
n = T.shape[0]
for i in range(n):
if not T[i, i]:
T[i, i] = tri_eps
elif np.any(abs_diag < tri_eps):
near_singularity_msg = 'The logm input matrix may be nearly singular.'
warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
return T
def _logm(A):
"""
Compute the matrix logarithm.
See the logm docstring in matfuncs.py for more info.
Notes
-----
In this function we look at triangular matrices that are similar
to the input matrix. If any diagonal entry of such a triangular matrix
is exactly zero then the original matrix is singular.
The matrix logarithm does not exist for such matrices,
but in such cases we will pretend that the diagonal entries that are zero
are actually slightly positive by an ad-hoc amount, in the interest
of returning something more useful than NaN. This will cause a warning.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
# If the input matrix dtype is integer then copy to a float dtype matrix.
if issubclass(A.dtype.type, np.integer):
A = np.asarray(A, dtype=float)
keep_it_real = np.isrealobj(A)
try:
if np.array_equal(A, np.triu(A)):
A = _logm_force_nonsingular_triangular_matrix(A)
if np.min(np.diag(A)) < 0:
A = A.astype(complex)
return _logm_triu(A)
else:
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T,Z)
else:
T, Z = schur(A, output='complex')
T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
U = _logm_triu(T)
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
except (SqrtmError, LogmError) as e:
X = np.empty_like(A)
X.fill(np.nan)
return X
|
import logging
import flask
import simplejson as json
import config
import mirroring
import signals
import storage
import toolkit
from .app import app
store = storage.load()
logger = logging.getLogger(__name__)
"""Those routes are loaded only when `standalone' is enabled in the config
file. The goal is to make the Registry working without the central Index
It's then possible to push images from Docker without talking to any other
entities. This module mimics the Index.
"""
def get_endpoints(cfg=None):
if not cfg:
cfg = config.load()
registry_endpoints = cfg.registry_endpoints
if not registry_endpoints:
#registry_endpoints = socket.gethostname()
registry_endpoints = flask.request.environ['HTTP_HOST']
return registry_endpoints
def generate_headers(namespace, repository, access):
registry_endpoints = get_endpoints()
# The token generated will be invalid against a real Index behind.
token = 'Token signature={0},repository="{1}/{2}",access={3}'.format(
toolkit.gen_random_string(), namespace, repository, access)
return {'X-Docker-Endpoints': registry_endpoints,
'WWW-Authenticate': token,
'X-Docker-Token': token}
@app.route('/v1/users', methods=['GET', 'POST'])
@app.route('/v1/users/', methods=['GET', 'POST'])
def get_post_users():
if flask.request.method == 'GET':
return toolkit.response('OK', 200)
try:
json.loads(flask.request.data)
except json.JSONDecodeError:
return toolkit.api_error('Error Decoding JSON', 400)
return toolkit.response('User Created', 201)
@app.route('/v1/users/<username>/', methods=['PUT'])
def put_username(username):
return toolkit.response('', 204)
def update_index_images(namespace, repository, data):
path = store.index_images_path(namespace, repository)
sender = flask.current_app._get_current_object()
try:
images = {}
data = json.loads(data) + json.loads(store.get_content(path))
for i in data:
iid = i['id']
if iid in images and 'checksum' in images[iid]:
continue
i_data = {'id': iid}
for key in ['checksum']:
if key in i:
i_data[key] = i[key]
images[iid] = i_data
data = images.values()
store.put_content(path, json.dumps(data))
signals.repository_updated.send(
sender, namespace=namespace, repository=repository, value=data)
except IOError:
signals.repository_created.send(
sender, namespace=namespace, repository=repository,
value=json.loads(data))
store.put_content(path, data)
@app.route('/v1/repositories/<path:repository>', methods=['PUT'])
@app.route('/v1/repositories/<path:repository>/images',
defaults={'images': True},
methods=['PUT'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def put_repository(namespace, repository, images=False):
data = None
try:
data = json.loads(flask.request.data)
except json.JSONDecodeError:
return toolkit.api_error('Error Decoding JSON', 400)
if not isinstance(data, list):
return toolkit.api_error('Invalid data')
update_index_images(namespace, repository, flask.request.data)
headers = generate_headers(namespace, repository, 'write')
code = 204 if images is True else 200
return toolkit.response('', code, headers)
@app.route('/v1/repositories/<path:repository>/images', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
@mirroring.source_lookup(index_route=True)
def get_repository_images(namespace, repository):
data = None
try:
path = store.index_images_path(namespace, repository)
data = store.get_content(path)
except IOError:
return toolkit.api_error('images not found', 404)
headers = generate_headers(namespace, repository, 'read')
return toolkit.response(data, 200, headers, True)
@app.route('/v1/repositories/<path:repository>/images', methods=['DELETE'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def delete_repository_images(namespace, repository):
# Does nothing, this file will be removed when DELETE on repos
headers = generate_headers(namespace, repository, 'delete')
return toolkit.response('', 204, headers)
@app.route('/v1/repositories/<path:repository>/auth', methods=['PUT'])
@toolkit.parse_repository_name
def put_repository_auth(namespace, repository):
return toolkit.response('OK')
@app.route('/v1/search', methods=['GET'])
def get_search():
return toolkit.response({})
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from __future__ import division
import pcbnew
import HelpfulFootprintWizardPlugin
import PadArray as PA
class QFPWizard(HelpfulFootprintWizardPlugin.HelpfulFootprintWizardPlugin):
def GetName(self):
return "QFP"
def GetDescription(self):
return "Quad Flat Package footprint wizard"
def GenerateParameterList(self):
self.AddParam("Pads", "n", self.uNatural, 100)
self.AddParam("Pads", "pad pitch", self.uMM, 0.5)
self.AddParam("Pads", "pad width", self.uMM, 0.25)
self.AddParam("Pads", "pad length", self.uMM, 1.5)
self.AddParam("Pads", "vertical pitch", self.uMM, 15)
self.AddParam("Pads", "horizontal pitch", self.uMM, 15)
self.AddParam("Pads", "oval", self.uBool, True)
self.AddParam("Package", "package width", self.uMM, 14)
self.AddParam("Package", "package height", self.uMM, 14)
self.AddParam("Package", "courtyard margin", self.uMM, 1)
def CheckParameters(self):
self.CheckParamInt("Pads", "*n", is_multiple_of=4)
self.CheckParamBool("Pads", "*oval")
def GetValue(self):
return "QFP_%d" % self.parameters["Pads"]["*n"]
def BuildThisFootprint(self):
pads = self.parameters["Pads"]
pad_pitch = pads["pad pitch"]
pad_length = self.parameters["Pads"]["pad length"]
pad_width = self.parameters["Pads"]["pad width"]
v_pitch = pads["vertical pitch"]
h_pitch = pads["horizontal pitch"]
pads_per_row = pads["*n"] // 4
row_len = (pads_per_row - 1) * pad_pitch
pad_shape = pcbnew.PAD_SHAPE_OVAL if pads["*oval"] else pcbnew.PAD_SHAPE_RECT
h_pad = PA.PadMaker(self.module).SMDPad( pad_length, pad_width,
shape=pad_shape, rot_degree=90.0)
v_pad = PA.PadMaker(self.module).SMDPad( pad_length, pad_width, shape=pad_shape)
#left row
pin1Pos = pcbnew.wxPoint(-h_pitch / 2, 0)
array = PA.PadLineArray(h_pad, pads_per_row, pad_pitch, True, pin1Pos)
array.SetFirstPadInArray(1)
array.AddPadsToModule(self.draw)
#bottom row
pin1Pos = pcbnew.wxPoint(0, v_pitch / 2)
array = PA.PadLineArray(v_pad, pads_per_row, pad_pitch, False, pin1Pos)
array.SetFirstPadInArray(pads_per_row + 1)
array.AddPadsToModule(self.draw)
#right row
pin1Pos = pcbnew.wxPoint(h_pitch / 2, 0)
array = PA.PadLineArray(h_pad, pads_per_row, -pad_pitch, True,
pin1Pos)
array.SetFirstPadInArray(2*pads_per_row + 1)
array.AddPadsToModule(self.draw)
#top row
pin1Pos = pcbnew.wxPoint(0, -v_pitch / 2)
array = PA.PadLineArray(v_pad, pads_per_row, -pad_pitch, False,
pin1Pos)
array.SetFirstPadInArray(3*pads_per_row + 1)
array.AddPadsToModule(self.draw)
lim_x = self.parameters["Package"]["package width"] / 2
lim_y = self.parameters["Package"]["package height"] / 2
inner = (row_len / 2) + pad_pitch
#top left - diagonal
self.draw.Line(-lim_x, -inner, -inner, -lim_y)
# top right
self.draw.Polyline([(inner, -lim_y), (lim_x, -lim_y), (lim_x, -inner)])
# bottom left
self.draw.Polyline([(-inner, lim_y), (-lim_x, lim_y), (-lim_x, inner)])
# bottom right
self.draw.Polyline([(inner, lim_y), (lim_x, lim_y), (lim_x, inner)])
# Courtyard
cmargin = self.parameters["Package"]["courtyard margin"]
self.draw.SetLayer(pcbnew.F_CrtYd)
sizex = (lim_x + cmargin) * 2 + pad_length
sizey = (lim_y + cmargin) * 2 + pad_length
# set courtyard line thickness to the one defined in KLC
thick = self.draw.GetLineTickness()
self.draw.SetLineTickness(pcbnew.FromMM(0.05))
self.draw.Box(0, 0, sizex, sizey)
# restore line thickness to previous value
self.draw.SetLineTickness(pcbnew.FromMM(thick))
#reference and value
text_size = self.GetTextSize() # IPC nominal
text_offset = v_pitch / 2 + text_size + pad_length / 2
self.draw.Value(0, text_offset, text_size)
self.draw.Reference(0, -text_offset, text_size)
QFPWizard().register()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.test_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval()
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.test_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.test_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.test_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.test_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_one(self):
"""Same image should be returned for gamma equal to one"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=1)
y_tf = y.eval()
y_np = x_np
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_less_zero(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number."
try:
image_ops.adjust_gamma(x, gamma=-1)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
image = image_ops.adjust_gamma(x, gamma=y)
err_msg = "Gamma should be a non-negative real number."
try:
image.eval()
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def test_adjust_gamma_zero(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=0)
y_tf = y.eval()
dtype = x.dtype.as_numpy_dtype
y_np = np.array([dtypes.dtype_range[dtype][1]] * x_np.size)
y_np = y_np.reshape((8, 8))
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_less_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
with self.cached_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=0.5)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 31, 45, 55, 63, 71, 78, 84], [
90, 95, 100, 105, 110, 115, 119, 123
], [127, 131, 135, 139, 142, 146, 149, 153], [
156, 159, 162, 165, 168, 171, 174, 177
], [180, 183, 186, 188, 191, 194, 196, 199], [
201, 204, 206, 209, 211, 214, 216, 218
], [221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_greater_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
with self.cached_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=2)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 0, 0, 0, 1, 1, 2, 3], [4, 5, 6, 7, 9, 10, 12, 14], [
16, 18, 20, 22, 25, 27, 30, 33
], [36, 39, 42, 45, 49, 52, 56, 60], [64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138], [
144, 150, 156, 163, 169, 176, 182, 189
], [196, 203, 211, 218, 225, 233, 241, 249]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = y.eval()
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturationFused(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturationFused(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.test_session(use_gpu=True):
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self._adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
self.assertTrue(y.op.name.startswith("flip_left_right"))
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = y.eval()
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
self.assertTrue(y.op.name.startswith("flip_up_down"))
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = y.eval()
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
self.assertTrue(y.op.name.startswith("transpose_image"))
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose_image, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose_image, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegexp(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, rotated.eval())
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.test_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, rotated.eval())
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.test_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegexp(
ValueError, 'Shape must be rank 0 but is rank 1'):
image_ops.adjust_contrast(x_np, [2.0])
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
x2 = np.square(x).astype(np.float32)
mn = np.mean(x)
vr = np.mean(x2) - (mn * mn)
stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
self.assertTrue(y.op.name.startswith("per_image_standardization"))
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.test_session(use_gpu=True):
whiten_np = whiten.eval()
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.test_session(use_gpu=True):
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = whiten.eval()
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"assertion failed:",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"), ([
0, -1, 3, 3
], "offset_width must be >= 0"), ([0, 0, 0, 3],
"target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], "height must be >= target + offset"),
([0, 2, 3, 3], "width must be >= target + offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32)
y = image_ops.central_crop(x, 0.33)
y_tf = y.eval(feed_dict={x: x_np})
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.test_session(use_gpu=True):
self.assertAllClose(y, y_tf.eval())
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
(0, -1, 4, 4, "offset_width must be >= 0"),
(2, 0, 4, 4, "height must be <= target - offset"),
(0, 2, 4, 4, "width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.test_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval()
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_placeholder,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval(feed_dict={
min_object_covered_placeholder: min_object_covered
})
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
with self.test_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = begin.eval()
end = end.eval()
bbox_for_drawing = bbox_for_drawing.eval()
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.test_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = begin.eval()
end = end.eval()
bbox_for_drawing = bbox_for_drawing.eval()
class ResizeImagesTest(test_util.TensorFlowTestCase):
OPTIONS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, opt, nptype):
if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.OPTIONS[0])
yshape = array_ops.shape(y)
newshape = yshape.eval()
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.OPTIONS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for opt in self.OPTIONS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, opt)
if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
opt)
expected = np.array(expected_data).reshape(target_shape)
resized = y.eval()
self.assertAllClose(resized, expected, atol=1e-5)
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA
]:
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt, align_corners=False)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethod.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethod.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA
]:
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt, align_corners=True)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.BICUBIC)
resized = y.eval()
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = y.eval()
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = out_op.eval()
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.BILINEAR,
align_corners=align_corners)
value[use_gpu] = out_op.eval()
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
img_shape = [1, 3, 2, 1]
with self.test_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize_images"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(x_tensor, target_max,
preserve_aspect_ratio=preserve_aspect_ratio)
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.test_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = sess.run([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
# Combined decode+crop should have the same shape inference
self.assertAllEqual(image1_crop.get_shape().as_list(),
image2.get_shape().as_list())
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = sess.run([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
lambda e: "Invalid JPEG data or crop window" in str(e)):
sess.run(result)
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.test_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = sess.run([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
with self.test_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
self.assertEqual(image_shape.tolist(), [256, 128, 3])
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
# Cmyk jpeg image has 4 channels.
self.assertEqual(image_shape.tolist(), [256, 128, 4])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.test_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = sess.run([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, image1.eval())
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
with self.test_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = sess.run([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
with self.test_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.test_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.test_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, "Identity")
self.assertEquals(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.test_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.test_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.test_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = y.eval()
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
def testTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = io_ops.read_file(os.path.join(prefix, path)).eval()
images = {}
for name, decode in decoders.items():
image = decode(contents).eval()
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
def testSelectFromThreeClusters(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
def testInvalidShape(self):
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 2 but is rank 1"):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"):
boxes = constant_op.constant([[0.0, 0.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will thrown.
with self.assertRaisesRegexp(ValueError,
"Dimensions must be equal, but are 1 and 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9, 0.75])
selected_indices = image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The scores should be 1D of shape [num_boxes].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 1 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The max_output_size should be a scaler (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 1"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, [3], 0.5)
# The iou_threshold should be a scaler (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
# image_ops.non_max_suppression = gen_image_ops.non_max_suppression_v3.
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
score_threshold = float('-inf')
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = selected_indices.eval()
self.assertAllClose(selected_indices, [3, 0, 5])
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):
def testSelectFromThreeClusters(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded.eval(), [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
def testSelectFromContinuousOverLap(self):
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_ouput_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_ouput_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
def testWrongDims(self):
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.test_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
def testShapeMismatch(self):
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.test_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = sess.run([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.test_session(use_gpu=True) as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.test_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.test_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.test_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, "psnr1").eval()
tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, "psnr2").eval()
tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, "psnr3").eval()
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.test_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(psnr_uint8.eval(), psnr_float32.eval(), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = sess.run([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.test_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
ssim = image_ops.ssim(*ph, max_val=1.0)
with self.test_session(use_gpu=True):
scores = [ssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(constant_op.constant(img1),
constant_op.constant(img2), 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, ssim.eval(), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, ssim.eval(), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(constant_op.constant(img1),
constant_op.constant(img2), 255)
with self.test_session(use_gpu=True):
self.assertLess(ssim.eval(), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(ssim_uint8.eval(), ssim_float32.eval(), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = sess.run([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.test_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
msssim = image_ops.ssim_multiscale(*ph, max_val=1.0)
with self.test_session(use_gpu=True):
scores = [msssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
scaled_ph = [x * scalar for x in ph]
msssim = image_ops.ssim_multiscale(*scaled_ph, max_val=1.0,
power_factors=(1, 1, 1, 1, 1))
grads = gradients.gradients(msssim, scalar)
with self.test_session(use_gpu=True) as sess:
np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]})
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(constant_op.constant(img1),
constant_op.constant(img2), 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, msssim.eval(), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, score_tensor.eval(), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.test_session(use_gpu=True) as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [image_ops.ssim_multiscale(x, y, 1.0)
for x, y in itertools.combinations(images, 2)]
msssim = sess.run(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(ssim_uint8.eval(), ssim_float32.eval(), atol=0.001)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = dy.eval()
actual_dx = dx.eval()
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.test_session(use_gpu=True):
actual_dy = dy.eval()
actual_dx = dx.eval()
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.test_session(use_gpu=True):
actual_sobel = sobel.eval()
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.test_session(use_gpu=True):
actual_sobel = sobel.eval()
self.assertAllClose(expected_batch, actual_sobel)
class DecodeImageTest(test_util.TensorFlowTestCase):
def testJpegUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testGifUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
if __name__ == "__main__":
googletest.main()
|
import requests
url = 'https://note.nkmk.me'
hb_count = 'http://api.b.st-hatena.com/entry.count'
r = requests.get(hb_count, params={'url': url})
print(r.url)
# http://api.b.st-hatena.com/entry.count?url=https%3A%2F%2Fnote.nkmk.me
print(r.text)
# 5
print(type(r.text))
# <class 'str'>
print(int(r.text))
# 5
print(type(int(r.text)))
# <class 'int'>
hb_counts = 'http://api.b.st-hatena.com/entry.counts'
r = requests.get(hb_counts, params={'url': ['https://www.google.co.jp', 'https://www.yahoo.co.jp']})
print(r.url)
# http://api.b.st-hatena.com/entry.counts?url=https%3A%2F%2Fwww.google.co.jp&url=https%3A%2F%2Fwww.yahoo.co.jp
j = r.json()
print(j)
# {'https://www.google.co.jp': 1385, 'https://www.yahoo.co.jp': 313}
print(type(j))
# <class 'dict'>
hb_total_count = 'http://api.b.st-hatena.com/entry.total_count'
r = requests.get(hb_total_count, params={'url': url})
print(r.url)
# http://api.b.st-hatena.com/entry.total_count?url=https%3A%2F%2Fnote.nkmk.me
j = r.json()
print(j)
# {'url': 'https://note.nkmk.me', 'total_bookmarks': 324}
print(j['total_bookmarks'])
# 324
hb_entry = 'http://b.hatena.ne.jp/entry/jsonlite/'
r = requests.get(hb_entry, params={'url': url})
print(r.url)
# http://b.hatena.ne.jp/entry/jsonlite/?url=https%3A%2F%2Fnote.nkmk.me
j = r.json()
import pprint
pprint.pprint(j)
# {'bookmarks': [{'comment': '',
# 'tags': ['*Python'],
# 'timestamp': '2018/06/02 10:52',
# 'user': 'pkdick'},
# {'comment': '',
# 'tags': [],
# 'timestamp': '2018/05/22 16:24',
# 'user': 'ilford400'},
# {'comment': '',
# 'tags': ['python'],
# 'timestamp': '2018/05/02 14:12',
# 'user': 'yem3399op'}],
# 'count': 5,
# 'eid': '356280517',
# 'entry_url': 'http://b.hatena.ne.jp/entry/s/note.nkmk.me/',
# 'screenshot': 'http://b.hatena.ne.jp/images/v4/public/common/noimage.png',
# 'title': 'nkmk note',
# 'url': 'https://note.nkmk.me/'}
print(type(j['bookmarks']))
# <class 'list'>
print(type(j['bookmarks'][0]))
# <class 'dict'>
for b in j['bookmarks']:
print(b['timestamp'])
# 2018/06/02 10:52
# 2018/05/22 16:24
# 2018/05/02 14:12
|
import json
import sys
import time
from twisted.internet.task import deferLater
from twisted.web import http
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.internet import reactor
from gsmmodem.modem import GsmModem, SentSms
from gsmmodem.exceptions import TimeoutException, PinRequiredError, IncorrectPinError
from config import config
from libs.services import modem_service
from libs import modem_manager
class Sms(Resource):
isLeaf = True
def __init__(self, serviceType):
Resource.__init__(self)
self.serviceType = serviceType
def render_POST(self, request):
if self.serviceType == 'send':
print "DEBUG: Got POST a request from {}".format(request.getClientIP())
# global debugObject
# reactor.callLater(2,reactor.stop)
# debugObject = request
print "DEBUG: ",
print(request.args)
# TODO: Return JSON with status and ACK of sending message
# TODO: Use inline call back ratherthan blocking call
d = deferLater(reactor, 0, lambda: request)
d.addCallback(self._delayedRender)
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
timestamp = int(time.time())
return_value = {
u'result': u'true',
u'timestamp': timestamp,
u'status': u'sent',
u'refid': u'N/A',
}
return json.dumps(return_value)
def _delayedRender(self, request):
mobile_number = request.args['mobile_number'][0]
if not (self.isMobile(mobile_number)):
return "Invalid mobile number: {}\nerror code:-1".format(mobile_number)
message = request.args['message'][0]
#TODO: find why this class var is not resolved not Service.debug_mode:
if True:
print("DEBUG: Running delayed job")
sendSms(mobile_number, message)
else:
print("[DEBUG_MODE]: Message = {} , \nmobile number = {}".format(mobile_number, message))
def isMobile(self, number):
try:
int(number)
if (len(number) != 10):
return False
return True
except ValueError:
return False
def sendSms(destination, message, deliver=False):
if deliver:
print ('\nSending SMS and waiting for delivery report...')
else:
print('\nSending SMS \nmessage ({}) \nto ({})...'.format(message, destination))
try:
modem = modem_manager.modems.get_random_modem()
sms = modem.sendSms(destination, message, waitForDeliveryReport=deliver)
except TimeoutException:
print('Failed to send message: the send operation timed out')
else:
if sms.report:
print('Message sent{0}'.format(
' and delivered OK.' if sms.status == SentSms.DELIVERED else ', but delivery failed.'))
else:
print('Message sent.')
class UnknownService(Resource):
isLeaf = True
def render(self, request):
return self.error_info(request)
def error_info(self, request):
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
request.setResponseCode(http.NOT_FOUND)
return_value = {
u'result': u'false',
u'reason': u'Unknown Service',
u'request': {
u'args': request.args,
u'client': {
u'host': request.client.host,
u'port': request.client.port,
u'type': request.client.type,
},
u'code': request.code,
u'method': request.method,
u'path': request.path,
}
}
return json.dumps(return_value)
class Service(Resource):
# isLeaf = True
debugMode = False
def __init__(self, debugMode):
Resource.__init__(self)
Service.debugMode = debugMode
def getChild(self, path, request):
if path == "sms":
return Sms(request.postpath[0]) # Get the next URL component
elif path == "modem":
return modem_service.ModemService(request.postpath[0])
elif path == "ping":
return Ping()
else:
return UnknownService()
def render_GET(self, request):
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
return_value = {u'result': u'ok'}
return json.dumps(return_value)
def restart(self):
pass
class Ping(Resource):
isLeaf = True
def render_GET(self, request):
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
timestamp = int(time.time())
return_value = {
u'result': u'true',
u'timestamp': timestamp,
u'status': u'pong',
}
return json.dumps(return_value)
def main():
port = config.api['port']
service_name = config.api['service_name']
debug_mode = config.api['debug']
resource = Service(debug_mode)
root_web = Site(resource)
resource.putChild(service_name, Service(debug_mode))
if not debug_mode:
modem_manager.init()
print("Connected to modem")
else:
print("DEBUG_MODE enabled no message will be sent out from the dongle")
reactor.listenTCP(port, root_web)
print "Server running on {} url: localhost:{}/{}".format(port, port, service_name)
reactor.run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# gingerprawn / api.univlib.jiangnan / GeoIP-like IP-to-dorm converter
# XXX This file is to be moved to backend as its functionality can be shared
#
# Copyright (C) 2011 Wang Xuerui <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import realpath, split as pathsplit
from os.path import join as pathjoin
DATA_FILE = realpath(pathjoin(pathsplit(__file__)[0], 'dorms.txt'))
DATA_ENC = 'utf-8-sig'
SEGTYPE_FIELD = u'segtype'
# now the parsing of individual parts uses dynamic dispatch
SEGTYPE_UNIFORM = u'uniform'
SEGTYPE_SPECIAL = u'special'
# lambdas are used to prevent scoping error
SEGTYPE_DISPATCH = {SEGTYPE_UNIFORM: lambda a, b, c: parse_uniform(a, b, c),
SEGTYPE_SPECIAL: lambda a, b, c: parse_special(a, b, c),
}
IPPREFIX_FIELD = u'ipprefix'
IP_BEGIN_FIELD = u'ipsubstart'
IP_STEP_FIELD = u'ipstep'
APARTMENT_START_FIELD = u'apartmentstart'
DATALINE_PREFIX = u'"'
# NOTE: comment support seems to be superfluous due to internal mechanism
# which automatically ignores lines w/o '=' or a dataline prefix.
# But this feature enables you to comment datalines.
LINECOMMENT_DELIM = u'--'
__LOOKUP_TABLE = None
def __not_initialized_err(*args, **kwargs):
raise RuntimeError("lookup table not initialized")
lookup = __not_initialized_err
def parse_parts(parts, dic=None):
if dic is None:
do_return = True
dic = {}
else:
do_return = False
for part in parts:
# lines = [l for l in part.split('\n') if l]
props = dict([l.split('=', 1) for l in part if '=' in l])
data = [l[1:].split(',') for l in part if l[0] == DATALINE_PREFIX]
# common to both types of segments, ipprefix is processed outside
# the individual parser fn's.
ipprefix = tuple([int(s) for s in props[IPPREFIX_FIELD].split('.')])
segtype = props[SEGTYPE_FIELD]
try:
dic[ipprefix][segtype] = {}
except KeyError:
dic[ipprefix] = {segtype: {}, }
curdic = dic[ipprefix][segtype]
# dispatch work
try:
SEGTYPE_DISPATCH[segtype](props, data, curdic)
except KeyError:
# upwards compatibility: when encountering unknown segtype,
# just ignore it and go on
pass
if do_return:
return dic
return
def parse_uniform(props, data, dic):
subrange = int(props[IP_BEGIN_FIELD])
delta = int(props[IP_STEP_FIELD])
aptstart = int(props[APARTMENT_START_FIELD])
# store some metadata to help seeking
dic[IP_BEGIN_FIELD] = subrange
dic[IP_STEP_FIELD] = delta
dic[APARTMENT_START_FIELD] = aptstart
# placeholder not needed, since apt no. is subtracted by aptstart
# first.
dic['data'] = []
curlst = dic['data']
for apt_name, num_apts_str, gender in data:
num_apts = int(num_apts_str)
genderchar = gender[0] if gender else u'' # FIXED: str type consistency
curlst += [(apt_name, apt_no, genderchar, )
for apt_no in range(aptstart, aptstart + num_apts)]
aptstart += num_apts
def parse_special(props, data, dic):
dic['data'] = {}
curmap = dic['data']
for apt_name, apt_no, ip_start, ip_end, gender in data:
genderchar = gender[0] if gender else u'' # FIXED: str type consistency
curmap[(int(ip_start), int(ip_end), )] = (apt_name, int(apt_no),
genderchar, )
def read_in(fname):
fp = open(fname, 'rb')
raw_data = fp.read()
fp.close()
u_data = unicode(raw_data, DATA_ENC)
# added comment support
# so elaborate this a little bit
rawlines = u_data.replace('\r\n', '\n').split('\n')
comment_indices = [l.find(LINECOMMENT_DELIM) for l in rawlines]
lines = [l if cidx == -1 else l[:cidx]
for l, cidx in zip(rawlines, comment_indices)]
data_w_o_comment = '\n'.join(lines)
return [p
for p in [[l for l in part_chunk.split('\n') if l]
for part_chunk in data_w_o_comment.split('\n\n')]
if p
]
def init_lookup(fname=DATA_FILE):
global __LOOKUP_TABLE
global lookup
__LOOKUP_TABLE = parse_parts(read_in(fname))
lookup = _lookup
def _lookup(ip_tuple):
# FIXED: returns None if list is passed in
ip_tuple = tuple(ip_tuple)
for prefix in __LOOKUP_TABLE:
if ip_tuple[:len(prefix)] == prefix:
subset = __LOOKUP_TABLE[prefix]
ip_tuple = ip_tuple[len(prefix):]
break
try:
subset
except NameError:
return None
for segtype, seg in subset.items():
if segtype == SEGTYPE_UNIFORM:
# Uniform distribution, use accelerated indexing
# Pull out the metadata stored during initialization.
subrange = seg[IP_BEGIN_FIELD]
delta = seg[IP_STEP_FIELD]
try:
return seg['data'][(ip_tuple[0] - subrange) / delta]
except IndexError:
pass # we put off failure return in case special branch has it
elif segtype == SEGTYPE_SPECIAL:
# We need to (at present) walk through the list.
for ip_start, ip_end in seg['data']:
if ip_start <= ip_tuple[0] <= ip_end: # Hit!
return seg['data'][(ip_start, ip_end, )]
# put off failure detection
# If we arrive here, our search has certainly failed.
# Admit failure.
return None
if __name__ == '__main__':
init_lookup()
prompt = 'ip addr, empty line to end: '
while True:
# hack to allow shorter input~
m = raw_input(prompt).replace('*', '172')
if not m:
break
ip = m.split('.')
if len(ip) != 4:
print 'incorrect address format'
continue
try:
ip = tuple([int(i) for i in ip])
except ValueError:
print 'invalid char in address'
continue
if not all(0<=i<=255 for i in ip):
print 'number too large or small'
continue
result = lookup(ip)
if result is None:
print 'not found in mapping'
else:
print u'%s %d# %s' % result
# vi:ai:et:ts=4 sw=4 sts=4 fenc=utf-8 ff=unix
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xlwt
from datetime import datetime
from openerp.osv import orm
from openerp.addons.report_xls.report_xls import report_xls
from openerp.addons.report_xls.utils import rowcol_to_cell, _render
from .nov_account_journal import nov_journal_print
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class account_journal_xls_parser(nov_journal_print):
def __init__(self, cr, uid, name, context):
super(account_journal_xls_parser, self).__init__(cr, uid, name,
context=context)
journal_obj = self.pool.get('account.journal')
self.context = context
wanted_list = journal_obj._report_xls_fields(cr, uid, context)
template_changes = journal_obj._report_xls_template(cr, uid, context)
self.localcontext.update({
'datetime': datetime,
'wanted_list': wanted_list,
'template_changes': template_changes,
})
class account_journal_xls(report_xls):
def __init__(self, name, table, rml=False, parser=False, header=True,
store=False):
super(account_journal_xls, self).__init__(
name, table, rml, parser, header, store)
# Cell Styles
_xs = self.xls_styles
# header
rh_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']
self.rh_cell_style = xlwt.easyxf(rh_cell_format)
self.rh_cell_style_center = xlwt.easyxf(rh_cell_format + _xs['center'])
self.rh_cell_style_right = xlwt.easyxf(rh_cell_format + _xs['right'])
# lines
aml_cell_format = _xs['borders_all']
self.aml_cell_style = xlwt.easyxf(aml_cell_format)
self.aml_cell_style_center = xlwt.easyxf(
aml_cell_format + _xs['center'])
self.aml_cell_style_date = xlwt.easyxf(
aml_cell_format + _xs['left'],
num_format_str=report_xls.date_format)
self.aml_cell_style_decimal = xlwt.easyxf(
aml_cell_format + _xs['right'],
num_format_str=report_xls.decimal_format)
# totals
rt_cell_format = _xs['bold'] + _xs['fill'] + _xs['borders_all']
self.rt_cell_style = xlwt.easyxf(rt_cell_format)
self.rt_cell_style_right = xlwt.easyxf(rt_cell_format + _xs['right'])
self.rt_cell_style_decimal = xlwt.easyxf(
rt_cell_format + _xs['right'],
num_format_str=report_xls.decimal_format)
# XLS Template Journal Items
self.col_specs_lines_template = {
'move_name': {
'header': [1, 20, 'text', _render("_('Entry')")],
'lines':
[1, 0, 'text',
_render("l['move_name'] != '/' and l['move_name'] \
or ('*'+str(l['move_id']))")],
'totals': [1, 0, 'text', None]},
'move_date': {
'header': [1, 13, 'text', _render("_('Date')")],
'lines':
[1, 0, 'date',
_render("datetime.strptime(l['move_date'],'%Y-%m-%d')"),
None, self.aml_cell_style_date],
'totals': [1, 0, 'text', None]},
'acc_code': {
'header': [1, 12, 'text', _render("_('Account')")],
'lines': [1, 0, 'text', _render("l['acc_code']")],
'totals': [1, 0, 'text', None]},
'acc_name': {
'header': [1, 36, 'text', _render("_('Account Name')")],
'lines': [1, 0, 'text', _render("l['acc_name']")],
'totals': [1, 0, 'text', None]},
'aml_name': {
'header': [1, 42, 'text', _render("_('Description')")],
'lines': [1, 0, 'text', _render("l['aml_name']")],
'totals': [1, 0, 'text', None]},
'period': {
'header': [1, 12, 'text', _render("_('Period')")],
'lines': [1, 0, 'text', _render("l['period']")],
'totals': [1, 0, 'text', None]},
'journal': {
'header': [1, 20, 'text', _render("_('Journal')")],
'lines': [1, 0, 'text', _render("l['journal']")],
'totals': [1, 0, 'text', None]},
'journal_code': {
'header': [1, 10, 'text', _render("_('Journal')")],
'lines': [1, 0, 'text', _render("l['journal_code']")],
'totals': [1, 0, 'text', None]},
'analytic_account': {
'header': [1, 20, 'text', _render("_('Analytic Account')")],
'lines': [1, 0, 'text', _render("l['an_acc_name']")],
'totals': [1, 0, 'text', None]},
'analytic_account_code': {
'header': [1, 20, 'text', _render("_('Analytic Account')")],
'lines': [1, 0, 'text', _render("l['an_acc_code']")],
'totals': [1, 0, 'text', None]},
'partner_name': {
'header': [1, 36, 'text', _render("_('Partner')")],
'lines': [1, 0, 'text', _render("l['partner_name']")],
'totals': [1, 0, 'text', None]},
'partner_ref': {
'header': [1, 36, 'text', _render("_('Partner Reference')")],
'lines': [1, 0, 'text', _render("l['partner_ref']")],
'totals': [1, 0, 'text', None]},
'date_maturity': {
'header': [1, 13, 'text', _render("_('Maturity Date')")],
'lines':
[1, 0,
_render("l['date_maturity'] and 'date' or 'text'"),
_render(
"l['date_maturity'] and datetime.\
strptime(l['date_maturity'],'%Y-%m-%d') or None"),
None, self.aml_cell_style_date],
'totals': [1, 0, 'text', None]},
'debit': {
'header': [1, 18, 'text', _render("_('Debit')"), None,
self.rh_cell_style_right],
'lines': [1, 0, 'number', _render("l['debit']"), None,
self.aml_cell_style_decimal],
'totals': [1, 0, 'number', None, _render("debit_formula"),
self.rt_cell_style_decimal]},
'credit': {
'header': [1, 18, 'text', _render("_('Credit')"), None,
self.rh_cell_style_right],
'lines': [1, 0, 'number', _render("l['credit']"), None,
self.aml_cell_style_decimal],
'totals': [1, 0, 'number', None, _render("credit_formula"),
self.rt_cell_style_decimal]},
'balance': {
'header': [1, 18, 'text', _render("_('Balance')"), None,
self.rh_cell_style_right],
'lines': [1, 0, 'number', None, _render("bal_formula"),
self.aml_cell_style_decimal],
'totals': [1, 0, 'number', None, _render("bal_formula"),
self.rt_cell_style_decimal]},
'reconcile': {
'header': [1, 12, 'text', _render("_('Rec.')"), None,
self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("l['reconcile']"), None,
self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'reconcile_partial': {
'header': [1, 12, 'text', _render("_('Part. Rec.')"), None,
self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("l['reconcile_partial']"),
None, self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'tax_code': {
'header': [1, 6, 'text', _render("_('VAT')"), None,
self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("l['tax_code']"), None,
self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'tax_amount': {
'header': [1, 18, 'text', _render("_('VAT Amount')"), None,
self.rh_cell_style_right],
'lines': [1, 0, 'number', _render("l['tax_amount']"), None,
self.aml_cell_style_decimal],
'totals': [1, 0, 'text', None]},
'amount_currency': {
'header': [1, 18, 'text', _render("_('Am. Currency')"), None,
self.rh_cell_style_right],
'lines':
[1, 0,
_render("l['amount_currency'] and 'number' or 'text'"),
_render("l['amount_currency'] or None"),
None, self.aml_cell_style_decimal],
'totals': [1, 0, 'text', None]},
'currency_name': {
'header': [1, 6, 'text', _render("_('Curr.')"), None,
self.rh_cell_style_center],
'lines': [1, 0, 'text', _render("l['currency_name']"), None,
self.aml_cell_style_center],
'totals': [1, 0, 'text', None]},
'docname': {
'header': [1, 35, 'text', _render("_('Document')")],
'lines': [1, 0, 'text', _render("l['docname']")],
'totals': [1, 0, 'text', None]},
'move_ref': {
'header': [1, 25, 'text', _render("_('Entry Reference')")],
'lines': [1, 0, 'text', _render("l['move_ref']")],
'totals': [1, 0, 'text', None]},
'move_id': {
'header': [1, 10, 'text', _render("_('Entry Id')")],
'lines': [1, 0, 'text', _render("str(l['move_id'])")],
'totals': [1, 0, 'text', None]},
}
# XLS Template VAT Summary
self.col_specs_vat_summary_template = {
'tax_case_name': {
'header': [1, 45, 'text', _render("_('Description')")],
'tax_totals': [1, 0, 'text', _render("t.name")]},
'tax_code': {
'header': [1, 6, 'text', _render("_('Case')")],
'tax_totals': [1, 0, 'text', _render("t.code")]},
'tax_amount': {
'header': [1, 18, 'text', _render("_('Amount')"), None,
self.rh_cell_style_right],
'tax_totals': [1, 0, 'number', _render("sum_vat(o,t)"), None,
self.aml_cell_style_decimal]},
}
def _journal_title(self, o, ws, _p, row_pos, xlwt, _xs):
cell_style = xlwt.easyxf(_xs['xls_title'])
report_name = (10 * ' ').join([
_p.company.name,
_p.title(o)[0],
_p.title(o)[1],
_p._("Journal Overview") + ' - ' + _p.company.currency_id.name,
])
c_specs = [
('report_name', 1, 0, 'text', report_name),
]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=cell_style)
return row_pos + 1
def _journal_lines(self, o, ws, _p, row_pos, xlwt, _xs):
wanted_list = self.wanted_list
debit_pos = self.debit_pos
credit_pos = self.credit_pos
# Column headers
c_specs = map(lambda x: self.render(
x, self.col_specs_lines_template, 'header',
render_space={'_': _p._}), wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.rh_cell_style,
set_column_size=True)
ws.set_horz_split_pos(row_pos)
# account move lines
aml_start_pos = row_pos
aml_cnt = len(_p.lines(o))
cnt = 0
for l in _p.lines(o):
cnt += 1
debit_cell = rowcol_to_cell(row_pos, debit_pos)
credit_cell = rowcol_to_cell(row_pos, credit_pos)
bal_formula = debit_cell + '-' + credit_cell
_logger.debug('dummy call - %s', bal_formula)
c_specs = map(
lambda x: self.render(x, self.col_specs_lines_template,
'lines'), wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.aml_cell_style)
if l['draw_line'] and cnt != aml_cnt:
row_pos += 1
# Totals
debit_start = rowcol_to_cell(aml_start_pos, debit_pos)
debit_stop = rowcol_to_cell(row_pos - 1, debit_pos)
debit_formula = 'SUM(%s:%s)' % (debit_start, debit_stop)
_logger.debug('dummy call - %s', debit_formula)
credit_start = rowcol_to_cell(aml_start_pos, credit_pos)
credit_stop = rowcol_to_cell(row_pos - 1, credit_pos)
credit_formula = 'SUM(%s:%s)' % (credit_start, credit_stop)
_logger.debug('dummy call - %s', credit_formula)
debit_cell = rowcol_to_cell(row_pos, debit_pos)
credit_cell = rowcol_to_cell(row_pos, credit_pos)
bal_formula = debit_cell + '-' + credit_cell
c_specs = map(lambda x: self.render(
x, self.col_specs_lines_template, 'totals'), wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.rt_cell_style_right)
return row_pos + 1
def _journal_vat_summary(self, o, ws, _p, row_pos, xlwt, _xs):
if not _p.tax_codes(o):
return row_pos
title_cell_style = xlwt.easyxf(_xs['bold'])
c_specs = [('summary_title', 1, 0, 'text', _p._("VAT Declaration"))]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=title_cell_style) + 1
wanted_list = self.wanted_list
vat_summary_wanted_list = ['tax_case_name', 'tax_code', 'tax_amount']
# calculate col_span
cols_number = len(wanted_list)
vat_summary_cols_number = len(vat_summary_wanted_list)
if vat_summary_cols_number > cols_number:
raise orm.except_orm(
_('Programming Error!'),
_("vat_summary_cols_number should be < cols_number !"))
index = 0
for i in range(vat_summary_cols_number):
col = vat_summary_wanted_list[i]
col_size = self.col_specs_lines_template[
wanted_list[index]]['header'][1]
templ_col_size = self.col_specs_vat_summary_template[
col]['header'][1]
# _logger.warn("col=%s, col_size=%s, templ_col_size=%s",
# col, col_size, templ_col_size)
col_span = 1
if templ_col_size > col_size:
new_size = col_size
while templ_col_size > new_size:
col_span += 1
index += 1
new_size += self.col_specs_lines_template[
wanted_list[index]]['header'][1]
self.col_specs_vat_summary_template[col]['header'][0] = col_span
self.col_specs_vat_summary_template[
col]['tax_totals'][0] = col_span
index += 1
c_specs = map(lambda x: self.render(
x, self.col_specs_vat_summary_template, 'header'),
vat_summary_wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.rh_cell_style)
for t in _p.tax_codes(o):
c_specs = map(lambda x: self.render(
x, self.col_specs_vat_summary_template, 'tax_totals'),
vat_summary_wanted_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.aml_cell_style)
return row_pos
def generate_xls_report(self, _p, _xs, data, objects, wb):
wanted_list = _p.wanted_list
if _p.display_currency:
wanted_list += ['amount_currency', 'currency_name']
self.wanted_list = wanted_list
self.col_specs_lines_template.update(_p.template_changes)
self.debit_pos = 'debit' in wanted_list and wanted_list.index('debit')
self.credit_pos = 'credit' in wanted_list and wanted_list.index(
'credit')
if not (self.credit_pos and self.debit_pos) and 'balance' \
in wanted_list:
raise orm.except_orm(_('Customisation Error!'),
_("The 'Balance' field is a calculated XLS \
field requiring the presence of the \
'Debit' and 'Credit' fields !"))
for o in objects:
sheet_name = ' - '.join([o[1].code, o[0].code]
)[:31].replace('/', '-')
sheet_name = sheet_name[:31].replace('/', '-')
ws = wb.add_sheet(sheet_name)
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
row_pos = 0
# set print header/footer
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
# Data
row_pos = self._journal_title(o, ws, _p, row_pos, xlwt, _xs)
row_pos = self._journal_lines(o, ws, _p, row_pos, xlwt, _xs)
row_pos = self._journal_vat_summary(o, ws, _p, row_pos, xlwt, _xs)
account_journal_xls('report.nov.account.journal.xls', 'account.journal.period',
parser=account_journal_xls_parser)
|
"""
=======================================
FDR correction on T-test on sensor data
=======================================
One tests if the evoked response significantly deviates from 0.
Multiple comparison problem is addressed with
False Discovery Rate (FDR) correction.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.stats import bonferroni_correction, fdr_correction
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)[:30]
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
X = epochs.get_data() # as 3D matrix
X = X[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
T, pval = stats.ttest_1samp(X, 0)
alpha = 0.05
n_samples, n_tests = X.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
threshold_fdr = np.min(np.abs(T)[reject_fdr])
###############################################################################
# Plot
times = 1e3 * epochs.times
plt.close('all')
plt.plot(times, T, 'k', label='T-stat')
xmin, xmax = plt.xlim()
plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k',
label='p=0.05 (uncorrected)', linewidth=2)
plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r',
label='p=0.05 (Bonferroni)', linewidth=2)
plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b',
label='p=0.05 (FDR)', linewidth=2)
plt.legend()
plt.xlabel("Time (ms)")
plt.ylabel("T-stat")
plt.show()
|
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db import models
from django.db.models import Avg
from django.db.models import Q
from django.utils.translation import ugettext as _
from ordered_model.models import OrderedModel
from tinymce.models import HTMLField
from .managers import AssignmentManager, DictationManager, EnrolledManager
from simple_classroom.apps.downloads import STORAGE
class StudentProfile(models.Model):
user = models.OneToOneField(User)
cx = models.CharField(max_length=8, null=False, blank=False)
telephone = models.CharField(max_length=16, null=True, blank=True)
class Meta:
verbose_name = _(u'Estudiante')
verbose_name_plural = _(u'Estudiantes')
ordering = ['user__last_name']
def __unicode__(self):
return u'{}, {}'.format(
self.user.last_name.title(),
self.user.first_name.title())
@staticmethod
def autocomplete_search_fields():
return (
"user__last_name__icontains",
"user__first_name__icontains",)
class Subject(models.Model):
site = models.OneToOneField(Site)
code = models.CharField(_(u'Código'), max_length=8, null=False, blank=False)
name = models.CharField(_(u'Nombre'), max_length=30, null=False, blank=False)
short_name = models.CharField(_(u'Nombre corto'), max_length=15, null=True, blank=True)
class Meta:
verbose_name = _(u'Materia')
verbose_name_plural = _(u'Materias')
def __unicode__(self):
return u'{}'.format(self.name)
class Dictation(models.Model):
SEMESTER_CHOICES = (
(1, _(u'Primero')),
(2, _(u'Segundo')),
)
subject = models.ForeignKey(Subject)
date_from = models.DateField(_('Desde'), null=True, blank=True)
date_to = models.DateField(_('Hasta'), null=True, blank=True)
semester = models.IntegerField(_('Semestre'), choices=SEMESTER_CHOICES, default=1,
null=False, blank=False)
year = models.IntegerField(_(u'Año'), null=False, blank=False)
is_registration_open = models.BooleanField(_(u'Registración abierta'), default=True,
null=False, blank=False)
dictated_practice_hours = models.PositiveIntegerField(_(u'Horas dictadas de práctica'),
default=0, null=False, blank=False)
dictated_theory_hours = models.PositiveIntegerField(_(u'Horas dictadas de teoría'),
default=0, null=False, blank=False)
last_modification_date = models.DateTimeField(_(u'Fecha de última modificación'),
null=True, blank=True)
objects = DictationManager()
class Meta:
verbose_name = _(u'Dictado')
verbose_name_plural = _(u'Dictados')
ordering = ['-year', 'subject']
def __unicode__(self):
return u'{0} {1}'.format(self.subject, self.year)
def get_total_dictated_hours(self):
return self.dictated_practice_hours + self.dictated_theory_hours
def save(self, *args, **kwargs):
if self.pk is not None:
orig = Dictation.objects.get(pk=self.pk)
if (orig.dictated_practice_hours != self.dictated_practice_hours
or orig.dictated_theory_hours != self.dictated_theory_hours):
self.last_modification_date = datetime.datetime.now()
super(Dictation, self).save(*args, **kwargs)
class TeacherProfile(OrderedModel):
abstract = HTMLField(null=True, blank=True)
avatar = models.ImageField(_(u'Avatar'), upload_to='avatar', storage=STORAGE,
null=True, blank=True)
dictation = models.ManyToManyField(Dictation, verbose_name=_(u'Dictado'))
user = models.OneToOneField(User, verbose_name=_(u'Usuario'))
class Meta(OrderedModel.Meta):
verbose_name = _(u'Profesor')
verbose_name_plural = _(u'Profesores')
def __init__(self, *args, **kwargs):
super(TeacherProfile, self).__init__(*args, **kwargs)
try:
self.default_image = getattr(self, 'avatar', None)
except KeyError:
pass
def __unicode__(self):
return u'{0}'.format(self.user.get_full_name())
class Enrolled(models.Model):
student_profile = models.ForeignKey(StudentProfile, null=False, blank=False)
dictation = models.ForeignKey(Dictation, null=False, blank=False)
date = models.DateField()
previous_attempts = models.IntegerField(default=0)
objects = EnrolledManager()
class Meta:
verbose_name = _(u'Inscripto')
verbose_name_plural = _(u'Inscriptos')
unique_together = (("student_profile", "dictation"),)
def __unicode__(self):
return u'{0}'.format(self.student_profile)
@staticmethod
def autocomplete_search_fields():
return (
"student_profile__cx__icontains",
"student_profile__user__last_name__icontains",
"student_profile__user__first_name__icontains",)
class Assignment(OrderedModel):
FINAL = _(u'Final')
LABORATORY = _(u'Laboratorio')
MIDTERM = _(u'Parcial')
EXERCISE = _(u'Práctico')
QUIZZ = _(u'Quizz')
ASSIGNMENT_TYPES = (
(1, FINAL),
(2, LABORATORY),
(3, MIDTERM),
(4, EXERCISE),
(5, QUIZZ),
)
dictation = models.ForeignKey(Dictation)
title = models.CharField(_(u'Título'), max_length=255, blank=False, null=False)
description = models.TextField(_(u'Descripción'), blank=True, null=True)
is_published = models.BooleanField(
_(u'Publicado'), blank=False, null=False, default=False,
help_text=_(u'Tildar para mostrar la asignación a los inscriptos en el dictado seleccionado.'))
publication_date = models.DateTimeField(_(u'Fecha de publicación'), blank=True, null=True)
is_evaluated = models.BooleanField(
_(u'Evaluado'), blank=False, null=False, default=False,
help_text=_(u'Tildar para indicar que la evaluación ya fue tomada y está disponible.'))
evaluation_date = models.DateTimeField(_(u'Fecha de evaluación'), blank=True, null=True)
is_scored = models.BooleanField(
_(u'Corregido'), blank=False, null=False, default=False,
help_text=_(u'Tildar para indicar que la evaluación ya fue corregida y las notas están disponibles.'))
score_date = models.DateTimeField(_(u'Fecha de Notas'), blank=True, null=True)
assignment_type = models.IntegerField(_('Tipo'), choices=ASSIGNMENT_TYPES, default=4,
null=False, blank=False)
objects = AssignmentManager()
class Meta(OrderedModel.Meta):
verbose_name = _(u'Asignación')
verbose_name_plural = _(u'Asignaciones')
ordering = ['-dictation__year', 'title']
def __unicode__(self):
return u'{} ({})'.format(self.title, self.dictation.year)
@staticmethod
def autocomplete_search_fields():
return ("title__icontains", "dictation__year__icontains", )
def save(self, *args, **kwargs):
cache.clear() # invalidates caching. TODO: Improve this piece of crap.
if self.pk is not None:
orig = Assignment.objects.get(pk=self.pk)
if orig.is_published != self.is_published and self.is_published is True:
# TODO: trigger signal, create news
self.publication_date = datetime.datetime.now()
if orig.is_evaluated != self.is_evaluated and self.is_evaluated is True:
# TODO: trigger signal, create news
self.evaluation_date = datetime.datetime.now()
if orig.is_scored != self.is_scored and self.is_scored is True:
# TODO: trigger signal, create news
self.score_date = datetime.datetime.now()
super(Assignment, self).save(*args, **kwargs)
def get_previous_assignments(self):
''' Returns the assignments for the last 2 previous dictations. '''
return Assignment.objects.filter(
is_published=True,
assignment_type=self.assignment_type,
title=self.title,
dictation__in=Dictation.objects.filter(~Q(pk=self.dictation.pk), subject=self.dictation.subject)).order_by('-dictation__year')[:2]
def get_default_download(self):
''' Return the default download, you can set the title you want in the settings file. '''
try:
return self.download_set.get(title=getattr(settings, 'ASSIGNMENT_DEFAULT_DOWNLOAD', 'default'))
except:
return None
def get_resources_for_download(self):
return self.download_set.filter(~Q(title=getattr(settings, 'ASSIGNMENT_DEFAULT_DOWNLOAD', 'default')))
class Score(models.Model):
assignment = models.ForeignKey(Assignment)
enrolled = models.ForeignKey(Enrolled)
date = models.DateTimeField(blank=False, null=False)
value = models.IntegerField(blank=False, null=False)
comment = models.CharField(max_length=255, blank=True, null=True)
class Meta:
verbose_name = _(u'Nota')
verbose_name_plural = _(u'Notas')
def __unicode__(self):
return u'{0}-{1}-{2}'.format(self.enrolled.student_profile.user.get_full_name(), self.assignment, self.value)
def get_average(self):
try:
average = self.assignment.score_set.exclude(value=-1).aggregate(Avg('value'))['value__avg']
return '%.2f' % average
except:
return ''
|
'''
flaskext.bcrypt
---------------
A Flask extension providing bcrypt hasing and comparison facilities.
:copyright: (c) 2011 by Max Countryman.
:license: BSD, see LICENSE for more details.
'''
from __future__ import absolute_import
from __future__ import print_function
__version_info__ = ('0', '7', '1')
__version__ = '.'.join(__version_info__)
__author__ = 'Max Countryman'
__license__ = 'BSD'
__copyright__ = '(c) 2011 by Max Countryman'
__all__ = ['Bcrypt', 'check_password_hash', 'generate_password_hash']
from werkzeug.security import safe_str_cmp
try:
import bcrypt
except ImportError as e:
print('bcrypt is required to use Flask-Bcrypt')
raise e
from sys import version_info
PY3 = version_info[0] >= 3
def generate_password_hash(password, rounds=None):
'''This helper function wraps the eponymous method of :class:`Bcrypt`. It
is intended to be used as a helper function at the expense of the
configuration variable provided when passing back the app object. In other
words this shortcut does not make use of the app object at all.
To this this function, simple import it from the module and use it in a
similar fashion as the method would be used. Here is a quick example::
from flask.ext.bcrypt import generate_password_hash
pw_hash = generate_password_hash('hunter2', 10)
:param password: The password to be hashed.
:param rounds: The optional number of rounds.
'''
return Bcrypt().generate_password_hash(password, rounds)
def check_password_hash(pw_hash, password):
'''This helper function wraps the eponymous method of :class:`Bcrypt.` It
is intended to be used as a helper function at the expense of the
configuration variable provided when passing back the app object. In other
words this shortcut does not make use of the app object at all.
To this this function, simple import it from the module and use it in a
similar fashion as the method would be used. Here is a quick example::
from flask.ext.bcrypt import check_password_hash
check_password_hash(pw_hash, 'hunter2') # returns True
:param pw_hash: The hash to be compared against.
:param password: The password to compare.
'''
return Bcrypt().check_password_hash(pw_hash, password)
class Bcrypt(object):
'''Bcrypt class container for password hashing and checking logic using
bcrypt, of course. This class may be used to intialize your Flask app
object. The purpose is to provide a simple interface for overriding
Werkzeug's built-in password hashing utilities.
Although such methods are not actually overriden, the API is intentionally
made similar so that existing applications which make use of the previous
hashing functions might be easily adapted to the stronger facility of
bcrypt.
To get started you will wrap your application's app object something like
this::
app = Flask(__name__)
bcrypt = Bcrypt(app)
Now the two primary utility methods are exposed via this object, `bcrypt`.
So in the context of the application, important data, such as passwords,
could be hashed using this syntax::
password = 'hunter2'
pw_hash = bcrypt.generate_password_hash(password)
Once hashed, the value is irreversible. However in the case of validating
logins a simple hashing of candidate password and subsequent comparison.
Importantly a comparison should be done in constant time. This helps
prevent timing attacks. A simple utility method is provided for this::
candidate = 'secret'
bcrypt.check_password_hash(pw_hash, candidate)
If both the candidate and the existing password hash are a match
`check_password_hash` returns True. Otherwise, it returns False.
.. admonition:: Namespacing Issues
It's worth noting that if you use the format, `bcrypt = Bcrypt(app)`
you are effectively overriding the bcrypt module. Though it's unlikely
you would need to access the module outside of the scope of the
extension be aware that it's overriden.
Alternatively consider using a different name, such as `flask_bcrypt
= Bcrypt(app)` to prevent naming collisions.
Additionally a configuration value for `BCRYPT_LOG_ROUNDS` may be set in
the configuration of the Flask app. If none is provided this will
internally be assigned to 12. (This value is used in determining the
complexity of the encryption, see bcrypt for more details.)
:param app: The Flask application object. Defaults to None.
'''
_log_rounds = 12
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
'''Initalizes the application with the extension.
:param app: The Flask application object.
'''
self._log_rounds = app.config.get('BCRYPT_LOG_ROUNDS', 12)
def generate_password_hash(self, password, rounds=None):
'''Generates a password hash using bcrypt. Specifying `rounds`
sets the log_rounds parameter of `bcrypt.gensalt()` which determines
the complexity of the salt. 12 is the default value.
Example usage of :class:`generate_password_hash` might look something
like this::
pw_hash = bcrypt.generate_password_hash('secret', 10)
:param password: The password to be hashed.
:param rounds: The optional number of rounds.
'''
if not password:
raise ValueError('Password must be non-empty.')
if rounds is None:
rounds = self._log_rounds
# Python 3 unicode strings must be encoded as bytes before hashing.
if PY3 and isinstance(password, str):
password = bytes(password, 'utf-8')
if not PY3 and isinstance(password, unicode):
password = password.encode('utf-8')
return bcrypt.hashpw(password, bcrypt.gensalt(rounds))
def check_password_hash(self, pw_hash, password):
'''Tests a password hash against a candidate password. The candidate
password is first hashed and then subsequently compared in constant
time to the existing hash. This will either return `True` or `False`.
Example usage of :class:`check_password_hash` would look something
like this::
pw_hash = bcrypt.generate_password_hash('secret', 10)
bcrypt.check_password_hash(pw_hash, 'secret') # returns True
:param pw_hash: The hash to be compared against.
:param password: The password to compare.
'''
# Python 3 unicode strings must be encoded as bytes before hashing.
if PY3 and isinstance(pw_hash, str):
pw_hash = bytes(pw_hash, 'utf-8')
if PY3 and isinstance(password, str):
password = bytes(password, 'utf-8')
if not PY3 and isinstance(pw_hash, unicode):
pw_hash = pw_hash.encode('utf-8')
if not PY3 and isinstance(password, unicode):
password = password.encode('utf-8')
return safe_str_cmp(bcrypt.hashpw(password, pw_hash), pw_hash)
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for `set_n_cpu_devices` from `fake.py`.
This test is isolated to ensure hermeticity because its execution changes
XLA backend configuration.
"""
import unittest
from absl.testing import absltest
from chex._src import asserts
from chex._src import fake
class DevicesSetterTest(absltest.TestCase):
def test_set_n_cpu_devices(self):
try:
# Should not initialize backends.
fake.set_n_cpu_devices(4)
except RuntimeError as set_cpu_exception:
raise unittest.SkipTest(
"set_n_cpu_devices: backend's already been initialized. "
'Run this test in isolation from others.') from set_cpu_exception
# Hence, this one does not fail.
fake.set_n_cpu_devices(6)
# This assert initializes backends.
asserts.assert_devices_available(6, 'cpu', backend='cpu')
# Which means that next call must fail.
with self.assertRaisesRegex(RuntimeError,
'Attempted to set 8 devices, but 6 CPUs.+'):
fake.set_n_cpu_devices(8)
if __name__ == '__main__':
absltest.main()
|
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings as s
from django.utils.translation import ugettext_lazy as _
class userProfile(models.Model):
user = models.ForeignKey(User, unique=True)
maxDoms = models.IntegerField(default=s.MAX_FREE)
global_records_per_zone = models.IntegerField(default=1000)
phone = models.CharField(max_length=192, blank=True)
alternate_email = models.EmailField(blank=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
prefered_language = models.CharField(max_length=10, choices=s.LANGUAGES, blank=True, null=True)
public_profile_field = models.BooleanField(default=False)
has_api = models.BooleanField(default=True)
class Meta:
db_table = u'auth_userprofile'
class DnsRecords(models.Model):
zone = models.CharField(max_length=192, blank=True, db_index=True)
host = models.CharField(max_length=192, blank=True, db_index=True)
type = models.CharField(max_length=24, blank=True, default='3600', db_index=True)
data = models.TextField(blank=True)
ttl = models.IntegerField(default=3600)
priority = models.IntegerField(null=True, blank=True)
refresh = models.IntegerField(default=3600)
retry = models.IntegerField(default=3600)
expire = models.IntegerField(default=86400)
minimum = models.IntegerField(default=3600)
serial = models.IntegerField(default=2008082700)
resp_person = models.CharField(max_length=192, default='resp.person.email')
primary_ns = models.CharField(max_length=192, default='ns1.yourdns.here')
data_count = models.IntegerField(default=0)
class Meta:
db_table = u'dns_records'
class suffixes(models.Model):
name = models.CharField(max_length=64, blank=False, null=False)
class dnsZones(models.Model):
zone_name = models.CharField(max_length=192, blank=False, null=False, unique=True)
add_date = models.DateTimeField(auto_now_add=True, auto_now=False)
last_update = models.DateTimeField(auto_now_add=True, auto_now=True)
owner = models.ForeignKey(User)
class Meta:
db_table = u'Zones'
def __unicode__(self):
return u'%s' % (self.zone_name)
class zoneMeta(models.Model):
zone_name = models.ForeignKey(dnsZones)
max_records = models.IntegerField(default=1000)
class Meta:
db_table = u'dns_zonemeta'
class ServiceTemplates(models.Model):
owner = models.ForeignKey(User, null=True, blank=False)
name = models.CharField(max_length=192, blank=False, null=False)
data = models.TextField(null=False, blank=False)
class Meta:
db_table = u'services_servicetemplates'
class ZoneServices(models.Model):
zone_name = models.ForeignKey(dnsZones, null=False, blank=False)
template = models.ForeignKey(ServiceTemplates, null=False, blank=False)
record_ids = models.TextField(null=False, blank=False)
class Meta:
unique_together = (("zone_name", "template"),)
db_table = u'services_zoneservices'
class ZoneShares(models.Model):
zone = models.ForeignKey(dnsZones)
user = models.ForeignKey(User)
class Meta:
unique_together = (("zone", "user"),)
|
# -*- coding: iso-8859-1 -*-
# Yum Exteder (yumex) - A graphic package management tool
# Copyright (C) 2013 Tim Lauridsen < timlau<AT>fedoraproject<DOT>org >
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to
# the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import configparser
import gettext
import locale
import logging
import os.path
import re
import subprocess
import sys
import time
import dnfdaemon.client
from gi.repository import Gdk, Gtk, Notify
import yumex.config as config
LOCALE_DIR = os.path.join(sys.prefix, 'share', 'locale')
locale.setlocale(locale.LC_ALL, '')
locale.bindtextdomain('yumex-dnf', LOCALE_DIR)
gettext.bindtextdomain('yumex-dnf', LOCALE_DIR)
gettext.textdomain('yumex-dnf')
_ = gettext.gettext
ngettext = gettext.ngettext
logger = logging.getLogger('yumex.misc')
class QueueEmptyError(Exception):
def __init__(self):
super(QueueEmptyError, self).__init__()
class TransactionBuildError(Exception):
def __init__(self, msgs):
super(TransactionBuildError, self).__init__()
self.msgs = msgs
class TransactionSolveError(Exception):
def __init__(self, msgs):
super(TransactionSolveError, self).__init__()
self.msgs = msgs
def dbus_dnfsystem(cmd):
subprocess.call(
'/usr/bin/dbus-send --system --print-reply '
'--dest=org.baseurl.DnfSystem / org.baseurl.DnfSystem.%s' % cmd,
shell=True)
def to_pkg_tuple(pkg_id):
"""Find the real package nevre & repoid from an package pkg_id"""
(n, e, v, r, a, repo_id) = str(pkg_id).split(',')
return n, e, v, r, a, repo_id
def list_to_string(pkg_list, first_delimitier, delimiter):
"""Creates a multiline string from a list of packages"""
string = first_delimitier
for pkg_name in pkg_list:
string = string + pkg_name + delimiter
return string
def pkg_id_to_full_name(pkg_id):
(n, e, v, r, a, repo_id) = to_pkg_tuple(pkg_id)
if e and e != '0':
return "%s-%s:%s-%s.%s" % (n, e, v, r, a)
else:
return "%s-%s-%s.%s" % (n, v, r, a)
def color_floats(spec):
rgba = Gdk.RGBA()
rgba.parse(spec)
return rgba.red, rgba.green, rgba.blue
def get_color(spec):
rgba = Gdk.RGBA()
rgba.parse(spec)
return rgba
def rgb_to_hex(r, g, b):
if isinstance(r, float):
r *= 255
g *= 255
b *= 255
return "#{0:02X}{1:02X}{2:02X}".format(int(r), int(g), int(b))
def color_to_hex(color):
return rgb_to_hex(color.red, color.green, color.blue)
def is_url(url):
urls = re.findall(
r'^http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+~]|'
r'[!*(),]|%[0-9a-fA-F][0-9a-fA-F])+', url)
return urls
def format_block(block, indent):
""" Format a block of text so they get the same indentation"""
spaces = " " * indent
lines = str(block).split('\n')
result = lines[0] + "\n"
for line in lines[1:]:
result += spaces + line + '\n'
return result
def get_style_color(widget):
"""Get the default color for a widget in current theme."""
context = widget.get_style_context()
context.save()
context.set_state(Gtk.StateFlags.NORMAL)
color = context.get_color(context.get_state())
context.restore()
return color
def doGtkEvents():
"""
"""
while Gtk.events_pending(): # process Gtk events
Gtk.main_iteration()
def ExceptionHandler(func):
"""
This decorator catch yum backed exceptions
"""
def newFunc(*args, **kwargs):
try:
rc = func(*args, **kwargs)
return rc
except dnfdaemon.client.DaemonError as e:
base = args[0] # get current class
base.exception_handler(e)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def TimeFunction(func):
"""
This decorator show the execution time of a function in the debug log
"""
def newFunc(*args, **kwargs):
t_start = time.time()
rc = func(*args, **kwargs)
t_end = time.time()
name = func.__name__
logger.debug("%s took %.2f sec", name, t_end - t_start)
return rc
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def format_number(number, SI=0, space=' '):
"""Turn numbers into human-readable metric-like numbers"""
symbols = ['', # (none)
'k', # kilo
'M', # mega
'G', # giga
'T', # tera
'P', # peta
'E', # exa
'Z', # zetta
'Y'] # yotta
if SI:
step = 1000.0
else:
step = 1024.0
thresh = 999
depth = 0
max_depth = len(symbols) - 1
# we want numbers between 0 and thresh, but don't exceed the length
# of our list. In that event, the formatting will be screwed up,
# but it'll still show the right number.
while number > thresh and depth < max_depth:
depth = depth + 1
number = number / step
if isinstance(number, int):
# it's an int or a long, which means it didn't get divided,
# which means it's already short enough
fmt = '%i%s%s'
elif number < 9.95:
# must use 9.95 for proper sizing. For example, 9.99 will be
# rounded to 10.0 with the .1f fmt string (which is too long)
fmt = '%.1f%s%s'
else:
fmt = '%.0f%s%s'
return fmt % (float(number or 0), space, symbols[depth])
def notify(summary, body):
Notify.init('Yum Extender')
icon = "yumex-dnf"
notification = Notify.Notification.new(summary, body, icon)
notification.set_timeout(5000) # timeout 5s
notification.show()
def check_dark_theme():
"""Returns True if Gtk using a dark theme"""
gtk_settings = Gtk.Settings.get_default()
return gtk_settings.get_property("gtk-application-prefer-dark-theme")
def logger_setup(logroot='yumex',
logfmt='%(asctime)s: %(message)s',
loglvl=logging.INFO):
"""Setup Python logging."""
logger = logging.getLogger(logroot)
logger.setLevel(loglvl)
formatter = logging.Formatter(logfmt, '%H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.propagate = False
logger.addHandler(handler)
def is_gnome():
"""Return True if desktop is Gnome."""
return os.environ.get("XDG_CURRENT_DESKTOP") == "GNOME"
class YumexConf(config.BaseConfig):
""" Yum Extender Config Setting"""
debug = config.BoolOption(False)
autostart = config.BoolOption(False)
theme = config.Option("System-Dark.theme")
use_dark = config.BoolOption(False)
color_install = config.Option('#8BE8FD')
color_update = config.Option('#FF79C6')
color_downgrade = config.Option('#50FA7B')
color_normal = config.Option('#D3DAE3')
color_obsolete = config.Option('#FFB86C')
history_days = config.IntOption(180)
newest_only = config.BoolOption(True)
clean_unused = config.BoolOption(False)
update_interval = config.IntOption(60)
autocheck_updates = config.BoolOption(False)
system_refresh = config.Option('2000-01-01 00:01')
refresh_interval = config.IntOption(12)
# headerbar is default if running gnome
hb_default = is_gnome()
headerbar = config.BoolOption(hb_default)
search_default = config.CaselessSelectionOption(
default='prefix',
allowed=('prefix', 'keyword', 'fields', 'key'))
search_fields = config.KeyListOption(['name', 'summary'])
win_height = config.IntOption(700)
win_width = config.IntOption(1150)
info_paned = config.IntOption(450)
win_maximized = config.BoolOption(False)
auto_select_updates = config.BoolOption(False)
repo_saved = config.BoolOption(False)
repo_enabled = config.KeyListOption([])
archs = config.KeyListOption([])
protected = config.KeyListOption(['yumex-dnf', 'python3-dnfdaemon'])
clean_instonly = config.BoolOption(True)
search_visible = config.BoolOption(False)
installonly_limit = config.PositiveIntOption(3, range_min=2,
names_of_0=["0", "<off>"])
class SessionConf(config.BaseConfig):
""" Yum Extender current session Setting"""
# show newest package version only for current session
newest_only = config.BoolOption(True)
# Clean orphan dependencies for this session
clean_unused = config.BoolOption(False)
# enabled repositories for this session
enabled_repos = config.ListOption([])
clean_instonly = config.BoolOption(False)
color_install = config.Option('#ffffff')
color_update = config.Option('#ffffff')
color_downgrade = config.Option('#ffffff')
color_normal = config.Option('#ffffff')
color_obsolete = config.Option('#ffffff')
class Config(object):
"""
Yum Extender Configuration class
"""
WRITE_ALWAYS = ['autostart', 'update_interval',
'update_startup_delay', 'autocheck_updates',
'update_notify', 'update_showicon']
def __init__(self):
object.__init__(self)
self.conf_dir = os.environ['HOME'] + "/.config/yumex-dnf"
if not os.path.isdir(self.conf_dir):
logger.info("creating config directory : %s", self.conf_dir)
os.makedirs(self.conf_dir, 0o700)
self.conf_file = self.conf_dir + "/yumex.conf"
self.parser = configparser.ConfigParser()
self.conf = YumexConf()
self.session = SessionConf()
self.read()
def read(self):
first_read = False
if not os.path.exists(self.conf_file):
logger.info("creating default config file : %s", self.conf_file)
first_read = True
else:
self.parser.read_file(open(self.conf_file, "r"))
if not self.parser.has_section('yumex'):
self.parser.add_section('yumex')
self.conf.populate(self.parser, 'yumex')
self.session.populate(self.parser, 'yumex')
if first_read:
self.write()
def write(self):
fp = open(self.conf_file, "w")
self.conf.write(fp, "yumex", Config.WRITE_ALWAYS)
fp.close()
CONFIG = Config()
|
#!/usr/bin/env python3
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Creates a complete tree of services (every level is filled).
import collections
from typing import Any, Dict, List
import yaml
REQUEST_SIZE = 128
RESPONSE_SIZE = 128
NUM_REPLICAS = 1
# Depth of the tree.
NUM_LEVELS = 3
# Amount of dependent or child services each service has.
NUM_BRANCHES = 3
NUM_SERVICES = sum([NUM_BRANCHES**i for i in range(NUM_LEVELS)])
Service = Dict[str, Any]
def main() -> None:
entrypoint = {
'name': 'svc-0',
'isEntrypoint': True,
}
services_paths = collections.deque([(entrypoint, ['0'])])
services = [] # type: List[Service]
for _ in range(NUM_SERVICES):
current_service, current_path = services_paths.popleft()
services.append(current_service)
remaining_services = NUM_SERVICES - len(services) - len(services_paths)
if remaining_services > 0:
child_services = [] # type: List[Service]
for child_service_i in range(
min(NUM_BRANCHES, remaining_services)):
child_path = current_path.copy()
child_path.append(str(child_service_i))
child_service_name = 'svc-{}'.format('-'.join(child_path))
child_service = {
'name': child_service_name,
} # type: Dict[str, Any]
child_services.append(child_service)
services_paths.append((child_service, child_path))
current_service['script'] = _call_all(child_services)
with open('gen.yaml', 'w') as f:
yaml.dump(
{
'defaults': {
'requestSize': REQUEST_SIZE,
'responseSize': RESPONSE_SIZE,
'numReplicas': NUM_REPLICAS,
},
'services': services,
},
f,
default_flow_style=False)
def _call_all(svcs: List[Service]) -> List[List[Dict]]:
return [[{'call': svc['name']} for svc in svcs]]
if __name__ == '__main__':
main()
|
from qwt_common import *
from pythonwidgets import *
from YbPlot import YbPlot
class FlexGraph(YbPlot):
def __init__(self, table):
YbPlot.__init__(self, zoomer = True, panner = True)
self.table = table
#self.startTimer(1000)
if table.model.rowCount() and table.model.columnCount():
self.resetGraph([(0,0)])
else:
self.resetGraph([])
def resetGraph(self, cells, cellsRight = []):
self.clearAll()
for i,cell in enumerate(cells):
r,c = cell
self.addLine(firstAxis = True, color=colorList[i % len(colorList)], label = '%s_%s' % (self.table.model.rLabels[r], self.table.model.cLabels[c]))
for i,cell in enumerate(cellsRight):
r,c = cell
self.addLine(firstAxis = False, color=colorList[i % len(colorList)], label = '%s_%s' % (self.table.model.rLabels[r], self.table.model.cLabels[c]))
self.cells = cells
self.cellsRight = cellsRight
self.updateData()
self.updateGraph()
def updateData(self):
# load past data points
T = self.table.model.arrayHist.shape[0]
for t in range(len(self.x), T):
self.x.append(t) #self.table.model.arrayHistTime[t])
for i,cell in enumerate(self.cells):
r,c = cell
self.y[i].append(self.table.model.arrayHist[t, r, c])
for i,cell in enumerate(self.cellsRight):
r,c = cell
self.yAxis2[i].append(self.table.model.arrayHist[t, r, c])
self.i = T
class NumericTable(QWidget):
def __init__(self, columnLabels = [], rowLabels = [], digits = [], includeGraph = False, showTable = True):
QWidget.__init__(self)
#self.tv = YbTableView()
self.tv = QTableView()
self.tv.verticalHeader().setDefaultSectionSize(18)
self.model = NumericModel(self, columnLabels, rowLabels, digits)
self.model.dataChanged.connect(self.modelUpdated)
self.proxy = NumericModelProxy()
self.proxy.setSourceModel(self.model)
self.tv.setModel(self.proxy)
self.tv.setMinimumSize(1, 1)
# layout
mainLayout = QVBoxLayout()
splitter = QSplitter()
splitter.setOrientation(Qt.Vertical)
# upper layout includes: reset graph button,
# column chooser (read-only qlistwidget, row input (qspinbox), FlexGraph
self.includeGraph = includeGraph
if includeGraph:
topsplitter = QSplitter()
topsplitter.setOrientation(Qt.Horizontal)
frm = QFrame()
l2 = QVBoxLayout()
self.yaxisCheck = QCheckBox('yaxis')
self.yaxisCheck.stateChanged.connect(self.yaxisChg)
self.maxY = QDoubleSpinBox()
self.maxY.setDecimals(4)
self.maxY.setRange(-999999.0, 999999.0)
self.maxY.valueChanged.connect(self.yaxisChg)
self.minY = QDoubleSpinBox()
self.minY.setDecimals(4)
self.minY.setRange(-999999.0, 999999.0)
self.minY.valueChanged.connect(self.yaxisChg)
l2.addWidget(self.yaxisCheck)
l2.addWidget(self.maxY)
l2.addWidget(self.minY)
frm.setLayout(l2)
self.graph = FlexGraph(self)
topsplitter.addWidget(frm)
topsplitter.addWidget(self.graph)
topsplitter.setSizes([0,10])
topsplitter.setStretchFactor(1,1)
splitter.addWidget(topsplitter)
splitter.setStretchFactor(0, 1)
# toggle sort buton
buttonLayout = QHBoxLayout()
button = QPushButton()
button.clicked.connect(self.toggleSort)
button.setText('Toggle Sort')
buttonLayout.addWidget(button)
if includeGraph:
resetHistButton = QPushButton('Reset History')
resetHistButton.clicked.connect(self.resetHist)
buttonLayout.addWidget(resetHistButton)
resetGraphButton = QPushButton('Reset Graph')
resetGraphButton.clicked.connect(self.resetGraph)
buttonLayout.addWidget(resetGraphButton)
freezeColsCheckBox = QCheckBox('Freeze Columns')
freezeColsCheckBox.stateChanged.connect(self.toggleFreeze)
freezeColsCheckBox.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self.frozenCols = False
buttonLayout.addWidget(freezeColsCheckBox)
self.frame2 = QFrame()
layout = QVBoxLayout()
layout.addLayout(buttonLayout)
if showTable:
layout.addWidget(self.tv)
self.frame2.setLayout(layout)
splitter.addWidget(self.frame2)
mainLayout.addWidget(splitter)
self.setLayout(mainLayout)
if isProd():
self.setStyleSheet("NumericTable { background: %s }" % '#FF6A6A')
self.width = self.height = 1
def yaxisChg(self):
if self.yaxisCheck.checkState() == Qt.Checked:
print 'set yaxis', self.minY.value(), self.maxY.value()
self.graph.setAxisScale(0, self.minY.value(), self.maxY.value())
else:
self.graph.setAxisAutoScale(0)
def modelUpdated(self):
if self.tv.isSortingEnabled():
if self.model.currSortCol < self.model.c:
self.model.sort(self.model.currSortCol, self.model.sortOrder)
else:
self.toggleSort()
def getModelRow(self, row):
row = self.proxy.mapToSource(self.proxy.index(row, 0)).row()
return self.model.getModelRowFromTableRow(row)
def resetModel(self, cols, rows, digits = None):
if digits == None:
digits = [0 for c in cols]
self.proxy.setFilter(None)
self.model.myreset(cols, rows, digits)
#for i in range(self.model.r):
# self.tv.setRowHeight(i, 18)
#self.resizeToContents()
def resetHist(self):
self.model.resetHist()
def resetGraph(self):
if self.includeGraph:
if self.frozenCols:
rows = sorted(list(set([self.getModelRow(idx.row()) for idx in self.tv.selectedIndexes()])))
cells = []
for r in rows:
for c in self.cols:
cells.append((r,c))
else:
cells = [(self.getModelRow(idx.row()), idx.column()) for idx in self.tv.selectedIndexes()]
self.graph.resetGraph(cells)
else:
print 'Graph not included'
def toggleFreeze(self, freeze):
self.frozenCols = (freeze == Qt.Checked)
if self.frozenCols:
self.cols = sorted(list(set([idx.column() for idx in self.tv.selectedIndexes()])))
print self.frozenCols, self.cols
else:
print 'unset freeze'
def resizeToContents(self):
width = self.tv.verticalHeader().width() + 70
for i in range(self.model.c):
width += self.tv.columnWidth(i) + 1
height = self.tv.horizontalHeader().height() + 70
for i in range(self.model.r):
height += self.tv.rowHeight(i) + 1
print 'Resizing to', width, 'by', height
self.resize(width, height)
self.width = width
self.height = height
def sizeHint(self):
return QSize(self.width, self.height)
def toggleSort(self):
self.tv.setSortingEnabled(not self.tv.isSortingEnabled())
self.model.emit(SIGNAL("layoutAboutToBeChanged()"))
self.model.currSortCol = None
self.model.emit(SIGNAL("layoutChanged()"))
# overload this function to set the cell backgrounds
def getBackground(self, row, col):
return QVariant()
# overload this function to set the cell backgrounds
def getColumnHeaderBackground(self, col):
return QVariant()
# overload this function to set the cell checkstate
def getCheckState(self, row, col):
return QVariant()
def getModel(self):
return self.model
class NumericModelProxy(QSortFilterProxyModel):
flist = None
def setFilter(self, flist):
self.flist = flist
self.invalidateFilter()
def filterAcceptsRow(self, row, source_parent):
if self.flist == None:
return True
r = self.sourceModel().getModelRowFromTableRow(row)
if r >= len(self.flist):
return True
return self.flist[r]
def sort(self, column, order):
self.sourceModel().sort(column, order)
class NumericModel(QAbstractTableModel):
def __init__(self, table, columnLabels, rowLabels, digits):
super(NumericModel,self).__init__()
self.table = table
self.r = 0
self.c = 0
self.currSortCol = None
self.myreset(columnLabels, rowLabels, digits)
#def parent(self, index):
# return QModelIndex()
def myreset(self, columnLabels, rowLabels, digits):
self.cLabels = columnLabels
self.rLabels = rowLabels
rc = self.rowCount()
r = len(self.rLabels)
cc = self.columnCount()
c = len(self.cLabels)
#print 'reset model', r, rc, c, cc, rowLabels, columnLabels, digits
#if r and r > rc: self.beginInsertRows(QModelIndex(), rc, r-1)
#if c and c > cc: self.beginInsertColumns(QModelIndex(), cc, c-1)
self.beginResetModel()
self.r = r
self.c = c
self.digits = digits
self.array = np.zeros((self.r, self.c))
self.arrayHist = np.zeros((0,self.r,self.c))
self.arrayHistTime = []
#self.currSortCol = None
#if r and r > rc: self.endInsertRows()
#if c and c > cc: self.endInsertColumns()
if self.currSortCol != None:
self.sort(self.currSortCol, self.sortOrder)
self.endResetModel()
self.beginIndex = self.createIndex(0,0)
self.endIndex = self.createIndex(self.r - 1, self.c - 1)
#self.dataChanged.emit(self.beginIndex, self.endIndex)
def resetHist(self):
self.arrayHist = np.zeros((0,self.r,self.c))
self.arrayHistTime = []
def storeArraySnapshot(self):
a = self.array.copy()
a.shape = (1, a.shape[0], a.shape[1])
self.arrayHist = np.vstack([self.arrayHist, a])
# #if self.arrayHist.shape[0] > 1000:
# # self.arrayHist = self.arrayHist[-1000:,:,:]
# self.arrayHistTime.append(ST.py_local_now_us() / (3600.0 * 1000000.0))
def setValue(self, r, c, val):
self.array[r, c] = float(val)
def updated(self):
self.storeArraySnapshot()
self.dataChanged.emit(self.beginIndex, self.endIndex)
#if self.currSortCol != None:
# self.sort(self.currSortCol, self.sortOrder)
def nosort(self):
self.currSortCol = None
def rowCount(self, parent = QModelIndex()):
return self.r
def columnCount(self, parent = QModelIndex()):
return self.c
def getModelRowFromTableRow(self, row):
#row = self.proxy.mapToSource(QModelIndex(row, 0)).row()
if self.currSortCol == None:
return row
else:
try:
return self.sortArray[row, self.currSortCol]
except:
print 'Error in NT::getModelRowFromTableRow:'
print traceback.print_exc()
return row
def setDigits(self, idx, digits):
try:
self.digits[idx] = digits
except KeyError:
pass
def data(self, index, role):
if not index.isValid():
return QVariant()
r = self.getModelRowFromTableRow(index.row())
c = index.column()
if role == Qt.DisplayRole:
return pstr(float(self.array[r,c]), self.digits[c])
elif role == Qt.TextAlignmentRole:
return Qt.AlignRight
elif role == Qt.BackgroundRole:
return self.table.getBackground(r, c)
elif role == Qt.CheckStateRole:
return self.table.getCheckState(r, c)
else:
return QVariant()
def headerData(self, i, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return QVariant(self.cLabels[i])
elif orientation == Qt.Vertical and role == Qt.DisplayRole:
if self.currSortCol != None:
i = self.sortArray[i,self.currSortCol]
return QVariant(self.rLabels[i])
elif orientation == Qt.Horizontal and role == Qt.BackgroundRole:
return self.table.getColumnHeaderBackground(i)
return QVariant()
def sort(self, col, order):
self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.sortArray = np.argsort(self.array, axis=0, kind='mergesort')
self.currSortCol = col
self.sortOrder = order
if order == Qt.DescendingOrder:
self.sortArray = self.sortArray[::-1,:]
self.emit(SIGNAL("layoutChanged()"))
if __name__ == "__main__":
app = QApplication(sys.argv)
cols = ['a','b','c']
rows = [str(i) for i in range(5)]
digits = [2,2,2]
w = NumericTable(cols, rows, digits, True)
w.resize(500,500)
w.show()
m = w.getModel()
nr = len(rows)
nc = len(cols)
def update():
r = np.random.randn(nr,nc)
for i in range(nr):
for j in range(nc):
m.setValue(i,j,float(i)*float(j)*r[i,j])
m.updated()
timer = QTimer()
timer.timeout.connect(update)
timer.start(500)
sys.exit(app.exec_())
|
from __future__ import absolute_import
import logging
import os
from email.parser import FeedParser # type: ignore
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.basecommand import Command
from pip._internal.status_codes import ERROR, SUCCESS
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
ignore_require_venv = True
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
name = dist.get('name', '')
required_by = [
pkg.project_name for pkg in pkg_resources.working_set
if name in [required.name for required in pkg.requires()]
]
logger.info("Name: %s", name)
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("Author: %s", dist.get('author', ''))
logger.info("Author-email: %s", dist.get('author-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
logger.info("Required-by: %s", ', '.join(required_by))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import paramiko
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts
import cinder.zonemanager.drivers.brocade.fc_zone_constants as ZoneConstant
from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
LOG = logging.getLogger(__name__)
class BrcdFCSanLookupService(FCSanLookupService):
"""The SAN lookup service that talks to Brocade switches.
Version History:
1.0.0 - Initial version
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
"""Initializing the client."""
super(BrcdFCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
self.create_configuration()
self.client = self.create_ssh_client(**kwargs)
def create_configuration(self):
"""Configuration specific to SAN context values."""
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
if len(fabric_names) > 0:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
def create_ssh_client(self, **kwargs):
ssh_client = paramiko.SSHClient()
known_hosts_file = kwargs.get('known_hosts_file', None)
if known_hosts_file is None:
ssh_client.load_system_host_keys()
else:
ssh_client.load_host_keys(known_hosts_file)
missing_key_policy = kwargs.get('missing_key_policy', None)
if missing_key_policy is None:
missing_key_policy = paramiko.WarningPolicy()
ssh_client.set_missing_host_key_policy(missing_key_policy)
return ssh_client
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
"""Provides the initiator/target map for available SAN contexts.
Looks up nameserver of each fc SAN configured to find logged in devices
and returns a map of initiator and target port WWNs for each fabric.
:param initiator_wwn_list: List of initiator port WWN
:param target_wwn_list: List of target port WWN
:returns List -- device wwn map in following format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
:raises Exception when connection to fabric is failed
"""
device_map = {}
formatted_target_list = []
formatted_initiator_list = []
fabric_map = {}
fabric_names = self.configuration.fc_fabric_names
fabrics = None
if not fabric_names:
raise exception.InvalidParameterValue(
err=_("Missing Fibre Channel SAN configuration "
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(self.get_formatted_wwn(t))
for i in initiator_wwn_list:
formatted_initiator_list.append(self.
get_formatted_wwn(i))
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_address')
fabric_user = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_password')
fabric_port = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_port')
# Get name server data from fabric and find the targets
# logged in
nsinfo = ''
try:
LOG.debug("Getting name server data for "
"fabric %s", fabric_ip)
self.client.connect(
fabric_ip, fabric_port, fabric_user, fabric_pwd)
nsinfo = self.get_nameserver_info()
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed collecting name server info from "
"fabric %s") % fabric_ip)
except Exception as e:
msg = _("SSH connection failed "
"for %(fabric)s with error: %(err)s"
) % {'fabric': fabric_ip, 'err': e}
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
self.client.close()
LOG.debug("Lookup service:nsinfo-%s", nsinfo)
LOG.debug("Lookup service:initiator list from "
"caller-%s", formatted_initiator_list)
LOG.debug("Lookup service:target list from "
"caller-%s", formatted_target_list)
visible_targets = filter(lambda x: x in formatted_target_list,
nsinfo)
visible_initiators = filter(lambda x: x in
formatted_initiator_list, nsinfo)
if visible_targets:
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug("No targets are in the nameserver for SAN %s",
fabric_name)
if visible_initiators:
# getting rid of the : before returning ~sk
for idx, elem in enumerate(visible_initiators):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug("No initiators are in the nameserver "
"for SAN %s", fabric_name)
fabric_map = {
'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[fabric_name] = fabric_map
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
nsinfo_list = []
try:
cli_output = self._get_switch_data(ZoneConstant.NS_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed collecting nsshow info for fabric"))
if cli_output:
nsinfo_list = self._parse_ns_output(cli_output)
try:
cli_output = self._get_switch_data(ZoneConstant.NS_CAM_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed collecting nscamshow"))
if cli_output:
nsinfo_list.extend(self._parse_ns_output(cli_output))
LOG.debug("Connector returning nsinfo-%s", nsinfo_list)
return nsinfo_list
def _get_switch_data(self, cmd):
stdin, stdout, stderr = None, None, None
utils.check_ssh_injection([cmd])
try:
stdin, stdout, stderr = self.client.exec_command(cmd)
switch_data = stdout.readlines()
except paramiko.SSHException as e:
msg = (_("SSH Command failed with error '%(err)s' "
"'%(command)s'") % {'err': e,
'command': cmd})
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
if (stdin):
stdin.flush()
stdin.close()
if (stdout):
stdout.close()
if (stderr):
stderr.close()
return switch_data
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns list of device port wwn from ns info
"""
nsinfo_list = []
for line in switch_data:
if not(" NL " in line or " N " in line):
continue
linesplit = line.split(';')
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
nsinfo_list.append(node_port_wwn)
else:
msg = _("Malformed nameserver string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return nsinfo_list
def get_formatted_wwn(self, wwn_str):
"""Utility API that formats WWN to insert ':'."""
if (len(wwn_str) != 16):
return wwn_str.lower()
else:
return (':'.join([wwn_str[i:i + 2]
for i in range(0, len(wwn_str), 2)])).lower()
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from oslo_config import cfg
__all__ = [
"RUNNER_NAME_WHITELIST",
"MANIFEST_FILE_NAME",
"LOCAL_RUNNER_DEFAULT_ACTION_TIMEOUT",
"REMOTE_RUNNER_DEFAULT_ACTION_TIMEOUT",
"REMOTE_RUNNER_DEFAULT_REMOTE_DIR",
"REMOTE_RUNNER_PRIVATE_KEY_HEADER",
"PYTHON_RUNNER_DEFAULT_ACTION_TIMEOUT",
"PYTHON_RUNNER_INVALID_ACTION_STATUS_EXIT_CODE",
"WINDOWS_RUNNER_DEFAULT_ACTION_TIMEOUT",
"COMMON_ACTION_ENV_VARIABLE_PREFIX",
"COMMON_ACTION_ENV_VARIABLES",
"DEFAULT_SSH_PORT",
"RUNNERS_NAMESPACE",
]
DEFAULT_SSH_PORT = 22
# A list of allowed characters for the pack name
RUNNER_NAME_WHITELIST = r"^[A-Za-z0-9_-]+"
# Manifest file name for runners
MANIFEST_FILE_NAME = "runner.yaml"
# Local runner
LOCAL_RUNNER_DEFAULT_ACTION_TIMEOUT = 60
# Remote runner
REMOTE_RUNNER_DEFAULT_ACTION_TIMEOUT = 60
try:
REMOTE_RUNNER_DEFAULT_REMOTE_DIR = cfg.CONF.ssh_runner.remote_dir
except:
REMOTE_RUNNER_DEFAULT_REMOTE_DIR = "/tmp"
REMOTE_RUNNER_PRIVATE_KEY_HEADER = "PRIVATE KEY-----".lower()
# Python runner
# Default timeout (in seconds) for actions executed by Python runner
PYTHON_RUNNER_DEFAULT_ACTION_TIMEOUT = 10 * 60
# Exit code with which the Python runner wrapper script exists if the Python
# action returns invalid status from the run() method
PYTHON_RUNNER_INVALID_ACTION_STATUS_EXIT_CODE = 220
PYTHON_RUNNER_DEFAULT_LOG_LEVEL = "DEBUG"
# Windows runner
WINDOWS_RUNNER_DEFAULT_ACTION_TIMEOUT = 10 * 60
# Prefix for common st2 environment variables which are available to the actions
COMMON_ACTION_ENV_VARIABLE_PREFIX = "ST2_ACTION_"
# Common st2 environment variables which are available to the actions
COMMON_ACTION_ENV_VARIABLES = [
"ST2_ACTION_PACK_NAME",
"ST2_ACTION_EXECUTION_ID",
"ST2_ACTION_API_URL",
"ST2_ACTION_AUTH_TOKEN",
]
# Namespaces for dynamically loaded runner modules
RUNNERS_NAMESPACE = "st2common.runners.runner"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""
The mux is used to open one or more devices and mix the inputs from all
of them into one "input" for the Crazyflie and UI.
"""
import logging
__author__ = 'Bitcraze AB'
__all__ = ['InputMux']
logger = logging.getLogger(__name__)
class InputMux(object):
def __init__(self, input_layer):
self._devs = {"Device": None}
self.name = "N/A"
self.input = input_layer
def _open_new_device(self, dev, role):
# Silently close device if open as other role
for r in self._devs:
if self._devs[r]:
if self._devs[r] == dev:
self._devs[r] = None
dev.close()
# First set role to None to stop reading
old_dev = self._devs[role]
self._devs[role] = None
if old_dev:
old_dev.close()
# Open the new device before attaching it to a role
dev.open()
self._devs[role] = dev
def supported_roles(self):
return list(self._devs.keys())
def add_device(self, dev, role):
logger.info("Adding device {} to MUX {}".format(dev.name, self.name))
self._open_new_device(dev, role)
def pause(self):
for d in [key for key in list(self._devs.keys()) if self._devs[key]]:
self._devs[d].close()
def resume(self):
for d in [key for key in list(self._devs.keys()) if self._devs[key]]:
self._devs[d].open()
def close(self):
"""Close down the MUX and close all it's devices"""
for d in [key for key in list(self._devs.keys()) if self._devs[key]]:
self._devs[d].close()
self._devs[d] = None
def read(self):
return None
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
import iss_ui
class ActionDeps:
def __init__(self, options):
# self.step_precedence contains the dependencies among the export steps.
self.step_precedence = {
'orgs': [''],
'packages': [''],
'source-packages': [''],
'errata': [''],
'kickstarts': [''],
'rpms': [''],
# 'srpms' : ['channels'],
'channels': ['channel-families'],
'channel-families': ['blacklists'],
'blacklists': ['arches'],
'short': ['channels'],
'arches': ['arches-extra'],
'arches-extra': [''],
}
# self.step_hierarchy lists the export steps in the order they need to be run.
self.step_hierarchy = [
'orgs',
'channel-families',
'arches',
'arches-extra',
'channels',
'blacklists',
'short',
'rpms',
'packages',
'errata',
'kickstarts',
]
self.options = options
self.action_dict = {'blacklists': 0}
def list_steps(self):
print("LIST OF STEPS:")
for step in self.step_hierarchy:
print(step)
sys.exit(0)
# Contains the logic for the --step option
def handle_step_option(self):
# If the user didn't use --step, set the last step to the end of self.step_hierarchy.
if not self.options.step:
self.options.step = self.step_hierarchy[-1]
# Make sure that the step entered by the user is actually a step.
if self.options.step not in self.step_hierarchy:
sys.stderr.write("Error: '%s' is not a valid step.\n" % self.options.step)
sys.exit(-1)
# Turn on all of the steps up to the option set as self.options.step.
for step in self.step_hierarchy:
self.action_dict[step] = 1
if step == self.options.step:
break
# This will set the rest of the steps to 0.
for step in self.step_hierarchy:
self.action_dict[step] = step in self.action_dict
# Handles the logic for the --no-rpms, --no-packages, --no-errata, --no-kickstarts, and --list-channels.
def handle_options(self):
if self.options.list_steps:
self.list_steps()
if self.options.no_rpms:
self.action_dict['rpms'] = 0
if self.options.no_packages:
self.action_dict['packages'] = 0
if self.options.no_errata:
self.action_dict['errata'] = 0
if self.options.no_kickstarts:
self.action_dict['kickstarts'] = 0
if not self.options.all_orgs and not self.options.org:
self.action_dict['orgs'] = 0
if self.options.list_channels:
self.action_dict['channels'] = 1
self.action_dict['blacklists'] = 0
self.action_dict['arches'] = 0
self.action_dict['channel-families'] = 1
# This method uses self.step_precendence to figure out if a step needs to be turned off.
def turn_off_dep_steps(self, step):
for dependent in self.step_precedence[step]:
if dependent in self.action_dict:
self.action_dict[dependent] = 0
# This method will call turn_off_dep_steps if the step is off or not present in self.action_dict.
def handle_step_dependents(self):
for step in self.step_hierarchy:
if step in self.action_dict:
if self.action_dict[step] == 0:
self.turn_off_dep_steps(step)
else:
self.turn_off_dep_steps(step)
# This will return the step_hierarchy and the action_dict.
def get_actions(self):
self.handle_step_option()
self.handle_options()
self.handle_step_dependents()
return self.step_hierarchy, self.action_dict
if __name__ == "__main__":
a = iss_ui.UI()
b = ActionDeps(a)
print(b.get_actions())
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of x86 operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from .generic import *
from .. import op as _op
@conv2d_strategy.register("intel_graphics")
def conv2d_strategy_intel_graphics(attrs, inputs, out_type, target):
"""conv2d intel graphics strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation_h, dilation_w = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.intel_graphics.conv2d_nchw),
wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_nchw),
name="conv2d_nchw.intel_graphics",
)
# conv2d_NCHWc won't work without alter op layout pass
# TODO(@Laurawly): fix this
strategy.add_implementation(
wrap_compute_conv2d(topi.intel_graphics.conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.intel_graphics",
plevel=5,
)
else:
raise RuntimeError("Unsupported conv2d layout {} for intel graphics".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.intel_graphics.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.intel_graphics.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.intel_graphics",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
raise RuntimeError("group_conv2d is not supported for intel graphics")
return strategy
@conv2d_NCHWc_strategy.register("intel_graphics")
def conv2d_NCHWc_strategy_intel_graphics(attrs, inputs, out_type, target):
"""conv2d_NCHWc intel_graphics strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(topi.intel_graphics.conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.intel_graphics",
)
return strategy
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP
description:
- Manage the software update settings of a BIG-IP.
version_added: "2.5"
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
type: bool
auto_phone_home:
description:
- Specifies whether to automatically send phone home data to the
F5 Networks PhoneHome server.
type: bool
frequency:
description:
- Specifies the schedule for the automatic update check.
choices:
- daily
- monthly
- weekly
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Enable automatic update checking
bigip_software_update:
auto_check: yes
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Disable automatic update checking and phoning home
bigip_software_update:
auto_check: no
auto_phone_home: no
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
auto_check:
description: Whether the system checks for updates automatically.
returned: changed
type: bool
sample: True
auto_phone_home:
description: Whether the system automatically sends phone home data.
returned: changed
type: bool
sample: True
frequency:
description: Frequency of auto update checks
returned: changed
type: string
sample: weekly
'''
from ansible.module_utils.basic import AnsibleModule
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check',
'autoPhonehome': 'auto_phone_home'
}
api_attributes = [
'autoCheck', 'autoPhonehome', 'frequency'
]
updatables = [
'auto_check', 'auto_phone_home', 'frequency'
]
returnables = [
'auto_check', 'auto_phone_home', 'frequency'
]
class ApiParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
return self._values['auto_check']
class ModuleParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] is True:
return 'enabled'
else:
return 'disabled'
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] is None:
return None
elif self._values['auto_phone_home'] is True:
return 'enabled'
else:
return 'disabled'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def auto_check(self):
if self._values['auto_check'] == 'enabled':
return True
elif self._values['auto_check'] == 'disabled':
return False
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] == 'enabled':
return True
elif self._values['auto_phone_home'] == 'disabled':
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
result = self.client.api.tm.sys.software.update.load()
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.software.update.load()
result = resource.attrs
return ApiParameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
auto_check=dict(
type='bool'
),
auto_phone_home=dict(
type='bool'
),
frequency=dict(
choices=['daily', 'monthly', 'weekly']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
"""
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
from collections import defaultdict
from importlib import import_module
import six
from bson.objectid import ObjectId
from ccx_keys.locator import CCXBlockUsageLocator, CCXLocator
from contracts import contract, new_contract
from mongodb_proxy import autoretry_read
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import (
BlockUsageLocator,
CourseLocator,
DefinitionLocator,
LibraryLocator,
LocalId,
VersionTree
)
from path import Path as path
from pytz import UTC
from xblock.core import XBlock
from xblock.fields import Reference, ReferenceList, ReferenceValueDict, Scope
from xmodule.assetstore import AssetMetadata
from xmodule.course_module import CourseSummary
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import null_error_tracker
from xmodule.library_content_module import LibrarySummary
from xmodule.modulestore import (
BlockData,
BulkOperationsMixin,
BulkOpsRecord,
ModuleStoreEnum,
ModuleStoreWriteBase,
SortedAssetList,
inheritance
)
from xmodule.modulestore.exceptions import (
DuplicateCourseError,
DuplicateItemError,
InsufficientSpecificationError,
MultipleCourseBlocksFound,
MultipleLibraryBlocksFound,
VersionConflictError
)
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.modulestore.split_mongo.mongo_connection import DuplicateKeyError, MongoConnection
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.partitions.partitions_service import PartitionService
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
new_contract('XBlock', XBlock)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
self.course_key = None
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return list(self.index.get('versions', {}).keys())
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return u"SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active_count,
self.initial_index,
self.index,
self.structures,
self.structures_in_db,
)
class SplitBulkWriteMixin(BulkOperationsMixin):
"""
This implements the :meth:`bulk_operations` modulestore semantics for the :class:`SplitMongoModuleStore`.
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface, and then exposes a set of methods
for interacting with course_indexes and structures that can be used by :class:`SplitMongoModuleStore`.
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values to ``self.mongo_connection`` when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
_bulk_ops_record_type = SplitBulkWriteRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.SplitBulkWriteRecord` for this course.
"""
# handle split specific things and defer to super otherwise
if course_key is None:
return self._bulk_ops_record_type()
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(u'{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
# handle version_guid based retrieval locally
if course_key.org is None or course_key.course is None or course_key.run is None:
return self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
# handle ignore case and general use
return super(SplitBulkWriteMixin, self)._get_bulk_ops_record(
course_key.replace(branch=None, version_guid=None), ignore_case
)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError('{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
if course_key.org and course_key.course and course_key.run:
del self._active_bulk_ops.records[course_key.replace(branch=None, version_guid=None)]
else:
del self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
def _start_outermost_bulk_operation(self, bulk_write_record, course_key, ignore_case=False):
"""
Begin a bulk write operation on course_key.
"""
bulk_write_record.initial_index = self.db_connection.get_course_index(course_key, ignore_case=ignore_case)
# Ensure that any edits to the index don't pollute the initial_index
bulk_write_record.index = copy.deepcopy(bulk_write_record.initial_index)
bulk_write_record.course_key = course_key
def _end_outermost_bulk_operation(self, bulk_write_record, structure_key):
"""
End the active bulk write operation on structure_key (course or library key).
"""
dirty = False
# If the content is dirty, then update the database
for _id in six.viewkeys(bulk_write_record.structures) - bulk_write_record.structures_in_db:
dirty = True
try:
self.db_connection.insert_structure(bulk_write_record.structures[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this structure inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate structure %s", _id)
for _id in six.viewkeys(bulk_write_record.definitions) - bulk_write_record.definitions_in_db:
dirty = True
try:
self.db_connection.insert_definition(bulk_write_record.definitions[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this definition inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate definition %s", _id)
if bulk_write_record.index is not None and bulk_write_record.index != bulk_write_record.initial_index:
dirty = True
if bulk_write_record.initial_index is None:
self.db_connection.insert_course_index(bulk_write_record.index, bulk_write_record.course_key)
else:
self.db_connection.update_course_index(
bulk_write_record.index,
from_index=bulk_write_record.initial_index,
course_context=bulk_write_record.course_key
)
return dirty
def get_course_index(self, course_key, ignore_case=False):
"""
Return the index for course_key.
"""
if self._is_in_bulk_operation(course_key, ignore_case):
return self._get_bulk_ops_record(course_key, ignore_case).index
else:
return self.db_connection.get_course_index(course_key, ignore_case)
def delete_course_index(self, course_key):
"""
Delete the course index from cache and the db
"""
if self._is_in_bulk_operation(course_key, False):
self._clear_bulk_ops_record(course_key)
self.db_connection.delete_course_index(course_key)
def insert_course_index(self, course_key, index_entry):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = index_entry
else:
self.db_connection.insert_course_index(index_entry, course_key)
def update_course_index(self, course_key, updated_index_entry):
"""
Change the given course's index entry.
Note, this operation can be dangerous and break running courses.
Does not return anything useful.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = updated_index_entry
else:
self.db_connection.update_course_index(updated_index_entry, course_context=course_key)
def get_structure(self, course_key, version_guid):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
structure = bulk_write_record.structures.get(version_guid)
# The structure hasn't been loaded from the db yet, so load it
if structure is None:
structure = self.db_connection.get_structure(version_guid, course_key)
bulk_write_record.structures[version_guid] = structure
if structure is not None:
bulk_write_record.structures_in_db.add(version_guid)
return structure
else:
# cast string to ObjectId if necessary
version_guid = course_key.as_object_id(version_guid)
return self.db_connection.get_structure(version_guid, course_key)
def update_structure(self, course_key, structure):
"""
Update a course structure, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
self._clear_cache(structure['_id'])
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.structures[structure['_id']] = structure
else:
self.db_connection.insert_structure(structure, course_key)
def get_cached_block(self, course_key, version_guid, block_id):
"""
If there's an active bulk_operation, see if it's cached this module and just return it
Don't do any extra work to get the ones which are not cached. Make the caller do the work & cache them.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
return bulk_write_record.modules[version_guid].get(block_id, None)
else:
return None
def cache_block(self, course_key, version_guid, block_key, block):
"""
The counterpart to :method `get_cached_block` which caches a block.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.modules[version_guid][block_key] = block
def decache_block(self, course_key, version_guid, block_key):
"""
Write operations which don't write from blocks must remove the target blocks from the cache.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
try:
del bulk_write_record.modules[version_guid][block_key]
except KeyError:
pass
def get_definition(self, course_key, definition_guid):
"""
Retrieve a single definition by id, respecting the active bulk operation
on course_key.
Args:
course_key (:class:`.CourseKey`): The course being operated on
definition_guid (str or ObjectID): The id of the definition to load
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
definition = bulk_write_record.definitions.get(definition_guid)
# The definition hasn't been loaded from the db yet, so load it
if definition is None:
definition = self.db_connection.get_definition(definition_guid, course_key)
bulk_write_record.definitions[definition_guid] = definition
if definition is not None:
bulk_write_record.definitions_in_db.add(definition_guid)
return definition
else:
# cast string to ObjectId if necessary
definition_guid = course_key.as_object_id(definition_guid)
return self.db_connection.get_definition(definition_guid, course_key)
def get_definitions(self, course_key, ids):
"""
Return all definitions that specified in ``ids``.
If a definition with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
course_key (:class:`.CourseKey`): The course that these definitions are being loaded
for (to respect bulk operations).
ids (list): A list of definition ids
"""
definitions = []
ids = set(ids)
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
# Only query for the definitions that aren't already cached.
for definition in bulk_write_record.definitions.values():
definition_id = definition.get('_id')
if definition_id in ids:
ids.remove(definition_id)
definitions.append(definition)
if len(ids):
# Query the db for the definitions.
defs_from_db = list(self.db_connection.get_definitions(list(ids), course_key))
defs_dict = {d.get('_id'): d for d in defs_from_db}
# Add the retrieved definitions to the cache.
bulk_write_record.definitions_in_db.update(six.iterkeys(defs_dict))
bulk_write_record.definitions.update(defs_dict)
definitions.extend(defs_from_db)
return definitions
def update_definition(self, course_key, definition):
"""
Update a definition, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.definitions[definition['_id']] = definition
else:
self.db_connection.insert_definition(definition, course_key)
def version_structure(self, course_key, structure, user_id):
"""
Copy the structure and update the history info (edited_by, edited_on, previous_version)
"""
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
bulk_write_record = self._get_bulk_ops_record(course_key)
# If we have an active bulk write, and it's already been edited, then just use that structure
if bulk_write_record.active and course_key.branch in bulk_write_record.dirty_branches:
return bulk_write_record.structure_for_branch(course_key.branch)
# Otherwise, make a new structure
new_structure = copy.deepcopy(structure)
new_structure['_id'] = ObjectId()
new_structure['previous_version'] = structure['_id']
new_structure['edited_by'] = user_id
new_structure['edited_on'] = datetime.datetime.now(UTC)
new_structure['schema_version'] = self.SCHEMA_VERSION
# If we're in a bulk write, update the structure used there, and mark it as dirty
if bulk_write_record.active:
bulk_write_record.set_structure_for_branch(course_key.branch, new_structure)
return new_structure
def version_block(self, block_data, user_id, update_version):
"""
Update the block_data object based on it having been edited.
"""
if block_data.edit_info.update_version == update_version:
return
original_usage = block_data.edit_info.original_usage
original_usage_version = block_data.edit_info.original_usage_version
block_data.edit_info.edited_on = datetime.datetime.now(UTC)
block_data.edit_info.edited_by = user_id
block_data.edit_info.previous_version = block_data.edit_info.update_version
block_data.edit_info.update_version = update_version
if original_usage:
block_data.edit_info.original_usage = original_usage
block_data.edit_info.original_usage_version = original_usage_version
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None, course_keys=None):
"""
Find the course_indexes which have the specified branch and search_targets. An optional org_target
can be specified to apply an ORG filter to return only the courses that are part of
that ORG.
Returns:
a Cursor if there are no changes in flight or a list if some have changed in current bulk op
"""
indexes = self.db_connection.find_matching_course_indexes(
branch,
search_targets,
org_target,
course_keys=course_keys)
indexes = self._add_indexes_from_active_records(
indexes,
branch,
search_targets,
org_target,
course_keys=course_keys
)
return indexes
def _add_indexes_from_active_records(
self,
course_indexes,
branch=None,
search_targets=None,
org_target=None,
course_keys=None
):
"""
Add any being built but not yet persisted or in the process of being updated
"""
def _replace_or_append_index(altered_index):
"""
If the index is already in indexes, replace it. Otherwise, append it.
"""
for index, existing in enumerate(course_indexes):
if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):
course_indexes[index] = altered_index
return
course_indexes.append(altered_index)
for _, record in self._active_records:
if branch and branch not in record.index.get('versions', {}):
continue
if search_targets:
if any(
'search_targets' not in record.index or
field not in record.index['search_targets'] or
record.index['search_targets'][field] != value
for field, value in six.iteritems(search_targets)
):
continue
# if we've specified a filter by org,
# make sure we've honored that filter when
# integrating in-transit records
if org_target:
if record.index['org'] != org_target:
continue
if course_keys:
index_exists_in_active_records = False
for course_key in course_keys:
if all(record.index[key_attr] == getattr(course_key, key_attr)
for key_attr in ['org', 'course', 'run']):
index_exists_in_active_records = True
break
if not index_exists_in_active_records:
continue
if not hasattr(course_indexes, 'append'): # Just in time conversion to list from cursor
course_indexes = list(course_indexes)
_replace_or_append_index(record.index)
return course_indexes
def find_courselike_blocks_by_id(self, ids, block_type):
"""
Find all structures that specified in `ids`. Return blocks matching with block_type.
Arguments:
ids (list): A list of structure ids
block_type: type of block to return
"""
ids = set(ids)
return self.db_connection.find_courselike_blocks_by_id(list(ids), block_type)
def find_structures_by_id(self, ids):
"""
Return all structures that specified in ``ids``.
If a structure with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
ids (list): A list of structure ids
"""
structures = []
ids = set(ids)
for _, record in self._active_records:
for structure in record.structures.values():
structure_id = structure.get('_id')
if structure_id in ids:
ids.remove(structure_id)
structures.append(structure)
structures.extend(self.db_connection.find_structures_by_id(list(ids)))
return structures
def find_structures_derived_from(self, ids):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if structure.get('previous_version') in ids:
structures.append(structure)
if '_id' in structure:
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_structures_derived_from(ids)
if structure['_id'] not in found_structure_ids
)
return structures
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Any structure found in the cache will be preferred to a structure with the same id from the database.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if 'original_version' not in structure:
continue
if structure['original_version'] != original_version:
continue
if block_key not in structure.get('blocks', {}):
continue
if 'update_version' not in structure['blocks'][block_key].get('edit_info', {}):
continue
structures.append(structure)
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_ancestor_structures(original_version, block_key)
if structure['_id'] not in found_structure_ids
)
return structures
class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
A Mongodb backed ModuleStore supporting versions, inheritance,
and sharing.
"""
SCHEMA_VERSION = 1
# a list of field names to store in course index search_targets. Note, this will
# only record one value per key. If branches disagree, the last one set wins.
# It won't recompute the value on operations such as update_course_index (e.g., to revert to a prev
# version) but those functions will have an optional arg for setting these.
SEARCH_TARGET_DICT = ['wiki_slug']
DEFAULT_ROOT_LIBRARY_BLOCK_TYPE = 'library'
DEFAULT_ROOT_COURSE_BLOCK_TYPE = 'course'
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None, fs_service=None, user_service=None,
services=None, signal_handler=None, **kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(SplitMongoModuleStore, self).__init__(contentstore, **kwargs)
self.db_connection = MongoConnection(**doc_store_config)
if default_class is not None:
module_path, __, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.services = services or {}
if i18n_service is not None:
self.services["i18n"] = i18n_service
if fs_service is not None:
self.services["fs"] = fs_service
if user_service is not None:
self.services["user"] = user_service
if self.request_cache is not None:
self.services["request_cache"] = self.request_cache
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.db_connection.close_connections()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
# drop the assets
super(SplitMongoModuleStore, self)._drop_database(database, collections, connections)
self.db_connection._drop_database(database, collections, connections) # pylint: disable=protected-access
def cache_items(self, system, base_block_ids, course_key, depth=0, lazy=True):
"""
Handles caching of items once inheritance and any other one time
per course per fetch operations are done.
Arguments:
system: a CachingDescriptorSystem
base_block_ids: list of BlockIds to fetch
course_key: the destination course providing the context
depth: how deep below these to prefetch
lazy: whether to load definitions now or later
"""
with self.bulk_operations(course_key, emit_signals=False):
new_module_data = {}
for block_id in base_block_ids:
new_module_data = self.descendants(
system.course_entry.structure['blocks'],
block_id,
depth,
new_module_data
)
# This method supports lazy loading, where the descendent definitions aren't loaded
# until they're actually needed.
if not lazy:
# Non-lazy loading: Load all descendants by id.
descendent_definitions = self.get_definitions(
course_key,
[
block.definition
for block in six.itervalues(new_module_data)
]
)
# Turn definitions into a map.
definitions = {definition['_id']: definition
for definition in descendent_definitions}
for block in six.itervalues(new_module_data):
if block.definition in definitions:
definition = definitions[block.definition]
# convert_fields gets done later in the runtime's xblock_from_json
block.fields.update(definition.get('fields'))
block.definition_loaded = True
system.module_data.update(new_module_data)
return system.module_data
@contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None")
def _load_items(self, course_entry, block_keys, depth=0, **kwargs):
"""
Load & cache the given blocks from the course. May return the blocks in any order.
Load the definitions into each block if lazy is in kwargs and is False;
otherwise, do not load the definitions - they'll be loaded later when needed.
"""
lazy = kwargs.pop('lazy', True)
should_cache_items = not lazy
runtime = self._get_cache(course_entry.structure['_id'])
if runtime is None:
runtime = self.create_runtime(course_entry, lazy)
self._add_cache(course_entry.structure['_id'], runtime)
should_cache_items = True
if should_cache_items:
self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy)
with self.bulk_operations(course_entry.course_key, emit_signals=False):
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid):
"""
Find the descriptor cache for this course if it exists
:param course_version_guid:
"""
if self.request_cache is None:
return None
return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)
def _add_cache(self, course_version_guid, system):
"""
Save this cache for subsequent access
:param course_version_guid:
:param system:
"""
if self.request_cache is not None:
self.request_cache.data.setdefault('course_cache', {})[course_version_guid] = system
return system
def _clear_cache(self, course_version_guid=None):
"""
Should only be used by testing or something which implements transactional boundary semantics.
:param course_version_guid: if provided, clear only this entry
"""
if self.request_cache is None:
return
if course_version_guid:
try:
del self.request_cache.data.setdefault('course_cache', {})[course_version_guid]
except KeyError:
pass
else:
self.request_cache.data['course_cache'] = {}
def _lookup_course(self, course_key, head_validation=True):
"""
Decode the locator into the right series of db access. Does not
return the CourseDescriptor! It returns the actual db json from
structures.
Semantics: if course id and branch given, then it will get that branch. If
also give a version_guid, it will see if the current head of that branch == that guid. If not
it raises VersionConflictError (the version now differs from what it was when you got your
reference) unless you specify head_validation = False, in which case it will return the
revision (if specified) by the course_key.
:param course_key: any subclass of CourseLocator
"""
if not course_key.version_guid:
head_validation = True
if head_validation and course_key.org and course_key.course and course_key.run:
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
# use the course id
index = self.get_course_index(course_key)
if index is None:
raise ItemNotFoundError(course_key)
if course_key.branch not in index['versions']:
raise ItemNotFoundError(course_key)
version_guid = index['versions'][course_key.branch]
if course_key.version_guid is not None and version_guid != course_key.version_guid:
# This may be a bit too touchy but it's hard to infer intent
raise VersionConflictError(course_key, version_guid)
elif course_key.version_guid is None:
raise InsufficientSpecificationError(course_key)
else:
# TODO should this raise an exception if branch was provided?
version_guid = course_key.version_guid
entry = self.get_structure(course_key, version_guid)
if entry is None:
raise ItemNotFoundError('Structure: {}'.format(version_guid))
# b/c more than one course can use same structure, the 'org', 'course',
# 'run', and 'branch' are not intrinsic to structure
# and the one assoc'd w/ it by another fetch may not be the one relevant to this fetch; so,
# add it in the envelope for the structure.
return CourseEnvelope(course_key.replace(version_guid=version_guid), entry)
def _get_courselike_blocks_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courselike without loading them.
"""
version_guids, id_version_map = self.collect_ids_from_matching_indexes(branch, **kwargs)
if not version_guids:
return
block_type = SplitMongoModuleStore.DEFAULT_ROOT_LIBRARY_BLOCK_TYPE \
if branch == 'library' else SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_TYPE
for entry in self.find_courselike_blocks_by_id(version_guids, block_type):
for course_index in id_version_map[entry['_id']]:
yield entry, course_index
def _get_structures_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
"""
version_guids, id_version_map = self.collect_ids_from_matching_indexes(branch, **kwargs)
if not version_guids:
return
for entry in self.find_structures_by_id(version_guids):
for course_index in id_version_map[entry['_id']]:
yield entry, course_index
def collect_ids_from_matching_indexes(self, branch, **kwargs):
"""
Find the course_indexes which have the specified branch. Extract `version_guids`
from the course_indexes.
"""
matching_indexes = self.find_matching_course_indexes(
branch,
search_targets=None,
org_target=kwargs.get('org'),
course_keys=kwargs.get('course_keys')
)
# collect ids and then query for those
version_guids = []
id_version_map = defaultdict(list)
for course_index in matching_indexes:
version_guid = course_index['versions'][branch]
version_guids.append(version_guid)
id_version_map[version_guid].append(course_index)
return version_guids, id_version_map
def _get_structures_for_branch_and_locator(self, branch, locator_factory, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
:param str branch: Branch to fetch structures from
:param type locator_factory: Factory to create locator from structure info and branch
"""
result = []
for entry, structure_info in self._get_structures_for_branch(branch, **kwargs):
locator = locator_factory(structure_info, branch)
envelope = CourseEnvelope(locator, entry)
root = entry['root']
structures_list = self._load_items(envelope, [root], depth=0, **kwargs)
if not isinstance(structures_list[0], ErrorDescriptor):
result.append(structures_list[0])
return result
def _create_course_locator(self, course_info, branch):
"""
Creates course locator using course_info dict and branch
"""
return CourseLocator(
org=course_info['org'],
course=course_info['course'],
run=course_info['run'],
branch=branch,
)
def _create_library_locator(self, library_info, branch):
"""
Creates library locator using library_info dict and branch
"""
return LibraryLocator(
org=library_info['org'],
library=library_info['course'],
branch=branch,
)
@autoretry_read()
def get_courses(self, branch, **kwargs):
"""
Returns a list of course descriptors matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
# get the blocks for each course index (s/b the root)
return self._get_structures_for_branch_and_locator(branch, self._create_course_locator, **kwargs)
@autoretry_read()
def get_course_summaries(self, branch, **kwargs):
"""
Returns a list of `CourseSummary` which matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
def extract_course_summary(course):
"""
Extract course information from the course block for split.
"""
return {
field: course.fields[field]
for field in CourseSummary.course_info_fields
if field in course.fields
}
courses_summaries = []
for entry, structure_info in self._get_courselike_blocks_for_branch(branch, **kwargs):
course_locator = self._create_course_locator(structure_info, branch=None)
course_block = [
block_data
for block_key, block_data in entry['blocks'].items()
if block_key.type == "course"
]
if not course_block:
raise ItemNotFoundError
if len(course_block) > 1:
raise MultipleCourseBlocksFound(
"Expected 1 course block to be found in the course, but found {0}".format(len(course_block))
)
course_summary = extract_course_summary(course_block[0])
courses_summaries.append(
CourseSummary(course_locator, **course_summary)
)
return courses_summaries
@autoretry_read()
def get_library_summaries(self, **kwargs):
"""
Returns a list of `LibrarySummary` objects.
kwargs can be valid db fields to match against active_versions
collection e.g org='example_org'.
"""
branch = 'library'
libraries_summaries = []
for entry, structure_info in self._get_courselike_blocks_for_branch(branch, **kwargs):
library_locator = self._create_library_locator(structure_info, branch=None)
library_block = [
block_data
for block_key, block_data in entry['blocks'].items()
if block_key.type == "library"
]
if not library_block:
raise ItemNotFoundError
if len(library_block) > 1:
raise MultipleLibraryBlocksFound(
"Expected 1 library block, but found {0}".format(len(library_block))
)
library_block_fields = library_block[0].fields
display_name = ''
if 'display_name' in library_block_fields:
display_name = library_block_fields['display_name']
libraries_summaries.append(
LibrarySummary(library_locator, display_name)
)
return libraries_summaries
def get_libraries(self, branch="library", **kwargs):
"""
Returns a list of "library" root blocks matching any given qualifiers.
TODO: better way of identifying library index entry vs. course index entry.
"""
return self._get_structures_for_branch_and_locator(branch, self._create_library_locator, **kwargs)
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore
that matches the supplied course_key.
"""
locator_cls = CCXBlockUsageLocator if isinstance(course_key, CCXLocator) else BlockUsageLocator
return locator_cls(course_key, 'course', 'course')
def _get_structure(self, structure_id, depth, head_validation=True, **kwargs):
"""
Gets Course or Library by locator
"""
structure_entry = self._lookup_course(structure_id, head_validation=head_validation)
root = structure_entry.structure['root']
result = self._load_items(structure_entry, [root], depth, **kwargs)
return result[0]
def get_course(self, course_id, depth=0, **kwargs):
"""
Gets the course descriptor for the course identified by the locator
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_id)
return self._get_structure(course_id, depth, **kwargs)
def get_library(self, library_id, depth=0, head_validation=True, **kwargs):
"""
Gets the 'library' root block for the library identified by the locator
"""
if not isinstance(library_id, LibraryLocator):
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(library_id)
return self._get_structure(library_id, depth, head_validation=head_validation, **kwargs)
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Does this course exist in this modulestore. This method does not verify that the branch &/or
version in the course_id exists. Use get_course_index_info to check that.
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
course_index = self.get_course_index(course_id, ignore_case)
return CourseLocator(course_index['org'], course_index['course'], course_index['run'], course_id.branch) if course_index else None
def has_library(self, library_id, ignore_case=False, **kwargs):
"""
Does this library exist in this modulestore. This method does not verify that the branch &/or
version in the library_id exists.
Returns the library_id of the course if it was found, else None.
"""
if not isinstance(library_id, LibraryLocator):
return None
index = self.get_course_index(library_id, ignore_case)
if index:
return LibraryLocator(index['org'], index['course'], library_id.branch)
return None
def has_item(self, usage_key):
"""
Returns True if usage_key exists in its course. Returns false if
the course or the block w/in the course do not exist for the given version.
raises InsufficientSpecificationError if the usage_key does not id a block
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
if usage_key.block_id is None:
raise InsufficientSpecificationError(usage_key)
try:
course_structure = self._lookup_course(usage_key.course_key).structure
except ItemNotFoundError:
# this error only occurs if the course does not exist
return False
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(usage_key)) is not None
@contract(returns='XBlock')
def get_item(self, usage_key, depth=0, **kwargs):
"""
depth (int): An argument that some module stores may use to prefetch
descendants of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all
descendants.
raises InsufficientSpecificationError or ItemNotFoundError
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_key)
with self.bulk_operations(usage_key.course_key):
course = self._lookup_course(usage_key.course_key)
items = self._load_items(course, [BlockKey.from_usage_key(usage_key)], depth, **kwargs)
if len(items) == 0:
raise ItemNotFoundError(usage_key)
elif len(items) > 1:
log.debug("Found more than one item for '{}'".format(usage_key))
return items[0]
def get_items(self, course_locator, settings=None, content=None, qualifiers=None, include_orphans=True, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_locator
NOTE: don't use this to look for courses as the course_locator is required. Use get_courses.
Args:
course_locator (CourseLocator): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
For substring matching pass a regex object.
For split,
you can search by ``edited_by``, ``edited_on`` providing a function testing limits.
include_orphans (boolean): Returns all items in a course, including orphans if present.
True - This would return all items irrespective of course in tree checking. It may fetch orphans
if present in the course.
False - if we want only those items which are in the course tree. This would ensure no orphans are
fetched.
"""
if not isinstance(course_locator, CourseKey) or course_locator.deprecated:
# The supplied courselike key is of the wrong type, so it can't possibly be stored in this modulestore.
return []
course = self._lookup_course(course_locator)
items = []
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
def _block_matches_all(block_data):
"""
Check that the block matches all the criteria
"""
# do the checks which don't require loading any additional data
if ( # pylint: disable=bad-continuation
self._block_matches(block_data, qualifiers) and
self._block_matches(block_data.fields, settings)
):
if content:
definition_block = self.get_definition(course_locator, block_data.definition)
return self._block_matches(definition_block['fields'], content)
else:
return True
if settings is None:
settings = {}
if 'name' in qualifiers:
# odd case where we don't search just confirm
block_name = qualifiers.pop('name')
block_ids = []
for block_id, block in six.iteritems(course.structure['blocks']):
# Don't do an in comparison blindly; first check to make sure
# that the name qualifier we're looking at isn't a plain string;
# if it is a string, then it should match exactly. If it's other
# than a string, we check whether it contains the block ID; this
# is so a list or other iterable can be passed with multiple
# valid qualifiers.
if isinstance(block_name, six.string_types):
name_matches = block_id.id == block_name
else:
name_matches = block_id.id in block_name
if name_matches and _block_matches_all(block):
block_ids.append(block_id)
return self._load_items(course, block_ids, **kwargs)
if 'category' in qualifiers:
qualifiers['block_type'] = qualifiers.pop('category')
# don't expect caller to know that children are in fields
if 'children' in qualifiers:
settings['children'] = qualifiers.pop('children')
# No need of these caches unless include_orphans is set to False
path_cache = None
parents_cache = None
if not include_orphans:
path_cache = {}
parents_cache = self.build_block_key_to_parents_mapping(course.structure)
for block_id, value in six.iteritems(course.structure['blocks']):
if _block_matches_all(value):
if not include_orphans:
if ( # pylint: disable=bad-continuation
block_id.type in DETACHED_XBLOCK_TYPES or
self.has_path_to_root(block_id, course, path_cache, parents_cache)
):
items.append(block_id)
else:
items.append(block_id)
if len(items) > 0:
return self._load_items(course, items, depth=0, **kwargs)
else:
return []
def build_block_key_to_parents_mapping(self, structure):
"""
Given a structure, builds block_key to parents mapping for all block keys in structure
and returns it
:param structure: db json of course structure
:return dict: a dictionary containing mapping of block_keys against their parents.
"""
children_to_parents = defaultdict(list)
for parent_key, value in six.iteritems(structure['blocks']):
for child_key in value.fields.get('children', []):
children_to_parents[child_key].append(parent_key)
return children_to_parents
def has_path_to_root(self, block_key, course, path_cache=None, parents_cache=None):
"""
Check recursively if an xblock has a path to the course root
:param block_key: BlockKey of the component whose path is to be checked
:param course: actual db json of course from structures
:param path_cache: a dictionary that records which modules have a path to the root so that we don't have to
double count modules if we're computing this for a list of modules in a course.
:param parents_cache: a dictionary containing mapping of block_key to list of its parents. Optionally, this
should be built for course structure to make this method faster.
:return Bool: whether or not component has path to the root
"""
if path_cache and block_key in path_cache:
return path_cache[block_key]
if parents_cache is None:
xblock_parents = self._get_parents_from_structure(block_key, course.structure)
else:
xblock_parents = parents_cache[block_key]
if len(xblock_parents) == 0 and block_key.type in ["course", "library"]:
# Found, xblock has the path to the root
if path_cache is not None:
path_cache[block_key] = True
return True
has_path = any(
self.has_path_to_root(xblock_parent, course, path_cache, parents_cache)
for xblock_parent in xblock_parents
)
if path_cache is not None:
path_cache[block_key] = has_path
return has_path
def get_parent_location(self, locator, **kwargs):
"""
Return the location (Locators w/ block_ids) for the parent of this location in this
course. Could use get_items(location, {'children': block_id}) but this is slightly faster.
NOTE: the locator must contain the block_id, and this code does not actually ensure block_id exists
:param locator: BlockUsageLocator restricting search scope
"""
if not isinstance(locator, BlockUsageLocator) or locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(locator)
course = self._lookup_course(locator.course_key)
all_parent_ids = self._get_parents_from_structure(BlockKey.from_usage_key(locator), course.structure)
# Check and verify the found parent_ids are not orphans; Remove parent which has no valid path
# to the course root
parent_ids = [
valid_parent
for valid_parent in all_parent_ids
if self.has_path_to_root(valid_parent, course)
]
if len(parent_ids) == 0:
return None
# find alphabetically least
parent_ids.sort(key=lambda parent: (parent.type, parent.id))
return BlockUsageLocator.make_relative(
locator,
block_type=parent_ids[0].type,
block_id=parent_ids[0].id,
)
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the orphans in the course.
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
course = self._lookup_course(course_key)
items = set(course.structure['blocks'].keys())
items.remove(course.structure['root'])
blocks = course.structure['blocks']
for block_id, block_data in six.iteritems(blocks):
items.difference_update(BlockKey(*child) for child in block_data.fields.get('children', []))
if block_data.block_type in detached_categories:
items.discard(block_id)
return [
course_key.make_usage_key(block_type=block_id.type, block_id=block_id.id)
for block_id in items
]
def get_course_index_info(self, course_key):
"""
The index records the initial creation of the indexed course and tracks the current version
heads. This function is primarily for test verification but may serve some
more general purpose.
:param course_key: must have a org, course, and run set
:return {'org': string,
versions: {'draft': the head draft version id,
'published': the head published version id if any,
},
'edited_by': who created the course originally (named edited for consistency),
'edited_on': when the course was originally created
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
if not (course_key.course and course_key.run and course_key.org):
return None
index = self.get_course_index(course_key)
return index
# TODO figure out a way to make this info accessible from the course descriptor
def get_course_history_info(self, course_key):
"""
Because xblocks doesn't give a means to separate the course structure's meta information from
the course xblock's, this method will get that info for the structure as a whole.
:param course_key:
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
course = self._lookup_course(course_key).structure
return {
'original_version': course['original_version'],
'previous_version': course['previous_version'],
'edited_by': course['edited_by'],
'edited_on': course['edited_on']
}
def get_definition_history_info(self, definition_locator, course_context=None):
"""
Because xblocks doesn't give a means to separate the definition's meta information from
the usage xblock's, this method will get that info for the definition
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(definition_locator, DefinitionLocator) or definition_locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(definition_locator)
definition = self.db_connection.get_definition(definition_locator.definition_id, course_context)
if definition is None:
return None
return definition['edit_info']
def get_course_successors(self, course_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this course. Return as a VersionTree
Mostly makes sense when course_locator uses a version_guid, but because it finds all relevant
next versions, these do include those created for other courses.
:param course_locator:
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_locator)
if version_history_depth < 1:
return None
if course_locator.version_guid is None:
course = self._lookup_course(course_locator)
version_guid = course.structure['_id']
course_locator = course_locator.for_version(version_guid)
else:
version_guid = course_locator.version_guid
# TODO if depth is significant, it may make sense to get all that have the same original_version
# and reconstruct the subtree from version_guid
next_entries = self.find_structures_derived_from([version_guid])
# must only scan cursor's once
next_versions = [struct for struct in next_entries]
result = {version_guid: [CourseLocator(version_guid=struct['_id']) for struct in next_versions]}
depth = 1
while depth < version_history_depth and len(next_versions) > 0:
depth += 1
next_entries = self.find_structures_derived_from([struct['_id'] for struct in next_versions])
next_versions = [struct for struct in next_entries]
for course_structure in next_versions:
result.setdefault(course_structure['previous_version'], []).append(
CourseLocator(version_guid=next_entries[-1]['_id']))
return VersionTree(course_locator, result)
def get_block_generations(self, block_locator):
"""
Find the history of this block. Return as a VersionTree of each place the block changed (except
deletion).
The block's history tracks its explicit changes but not the changes in its children starting
from when the block was created.
"""
# course_agnostic means we don't care if the head and version don't align, trust the version
course_struct = self._lookup_course(block_locator.course_key.course_agnostic()).structure
block_key = BlockKey.from_usage_key(block_locator)
all_versions_with_block = self.find_ancestor_structures(
original_version=course_struct['original_version'],
block_key=block_key
)
# find (all) root versions and build map {previous: {successors}..}
possible_roots = []
result = {}
for version in all_versions_with_block:
block_payload = self._get_block_from_structure(version, block_key)
if version['_id'] == block_payload.edit_info.update_version:
if block_payload.edit_info.previous_version is None:
# this was when this block was created
possible_roots.append(block_payload.edit_info.update_version)
else: # map previous to {update..}
result.setdefault(block_payload.edit_info.previous_version, set()).add(
block_payload.edit_info.update_version)
# more than one possible_root means usage was added and deleted > 1x.
if len(possible_roots) > 1:
# find the history segment including block_locator's version
element_to_find = self._get_block_from_structure(course_struct, block_key).edit_info.update_version
if element_to_find in possible_roots:
possible_roots = [element_to_find]
for possibility in possible_roots:
if self._find_local_root(element_to_find, possibility, result):
possible_roots = [possibility]
break
elif len(possible_roots) == 0:
return None
# convert the results value sets to locators
for k, versions in six.iteritems(result):
result[k] = [
block_locator.for_version(version)
for version in versions
]
return VersionTree(
block_locator.for_version(possible_roots[0]),
result
)
def get_definition_successors(self, definition_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this definition. Return as a VersionTree
"""
# TODO implement
pass
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator and version from
which the copy was inherited.
Returns usage_key, version if the data is available, otherwise returns (None, None)
"""
blocks = self._lookup_course(usage_key.course_key).structure['blocks']
block = blocks.get(BlockKey.from_usage_key(usage_key))
if block and block.edit_info.original_usage is not None:
usage_key = BlockUsageLocator.from_string(block.edit_info.original_usage)
return usage_key, block.edit_info.original_usage_version
return None, None
def create_definition_from_data(self, course_key, new_def_data, category, user_id):
"""
Pull the definition fields out of descriptor and save to the db as a new definition
w/o a predecessor and return the new id.
:param user_id: request.user object
"""
new_def_data = self._serialize_fields(category, new_def_data)
new_id = ObjectId()
document = {
'_id': new_id,
"block_type": category,
"fields": new_def_data,
"edit_info": {
"edited_by": user_id,
"edited_on": datetime.datetime.now(UTC),
"previous_version": None,
"original_version": new_id,
},
'schema_version': self.SCHEMA_VERSION,
}
self.update_definition(course_key, document)
definition_locator = DefinitionLocator(category, new_id)
return definition_locator
def update_definition_from_data(self, course_key, definition_locator, new_def_data, user_id):
"""
See if new_def_data differs from the persisted version. If so, update
the persisted version and return the new id.
:param user_id: request.user
"""
def needs_saved():
for key, value in six.iteritems(new_def_data):
if key not in old_definition['fields'] or value != old_definition['fields'][key]:
return True
for key, value in six.iteritems(old_definition.get('fields', {})):
if key not in new_def_data:
return True
# if this looks in cache rather than fresh fetches, then it will probably not detect
# actual change b/c the descriptor and cache probably point to the same objects
old_definition = self.get_definition(course_key, definition_locator.definition_id)
if old_definition is None:
raise ItemNotFoundError(definition_locator)
new_def_data = self._serialize_fields(old_definition['block_type'], new_def_data)
if needs_saved():
definition_locator = self._update_definition_from_data(course_key, old_definition, new_def_data, user_id)
return definition_locator, True
else:
return definition_locator, False
def _update_definition_from_data(self, course_key, old_definition, new_def_data, user_id):
"""
Update the persisted version of the given definition and return the
locator of the new definition. Does not check if data differs from the
previous version.
"""
new_definition = copy.deepcopy(old_definition)
new_definition['_id'] = ObjectId()
new_definition['fields'] = new_def_data
new_definition['edit_info']['edited_by'] = user_id
new_definition['edit_info']['edited_on'] = datetime.datetime.now(UTC)
# previous version id
new_definition['edit_info']['previous_version'] = old_definition['_id']
new_definition['schema_version'] = self.SCHEMA_VERSION
self.update_definition(course_key, new_definition)
return DefinitionLocator(new_definition['block_type'], new_definition['_id'])
def _generate_block_key(self, course_blocks, category):
"""
Generate a somewhat readable block id unique w/in this course using the category
:param course_blocks: the current list of blocks.
:param category:
"""
# NOTE: a potential bug is that a block is deleted and another created which gets the old
# block's id. a possible fix is to cache the last serial in a dict in the structure
# {category: last_serial...}
# A potential confusion is if the name incorporates the parent's name, then if the child
# moves, its id won't change and will be confusing
serial = 1
while True:
potential_key = BlockKey(category, "{}{}".format(category, serial))
if potential_key not in course_blocks:
return potential_key
serial += 1
@contract(returns='XBlock')
def create_item(self, user_id, course_key, block_type, block_id=None, definition_locator=None, fields=None,
asides=None, force=False, **kwargs):
"""
Add a descriptor to persistence as an element
of the course. Return the resulting post saved version with populated locators.
:param course_key: If it has a version_guid and a course org + course + run + branch, this
method ensures that the version is the head of the given course branch before making the change.
raises InsufficientSpecificationError if there is no course locator.
raises VersionConflictError if the version_guid of the course_or_parent_locator is not the head
of the its course unless force is true.
:param force: fork the structure and don't update the course draftVersion if the above
:param continue_revision: for multistep transactions, continue revising the given version rather than creating
a new version. Setting force to True conflicts with setting this to True and will cause a VersionConflictError
:param definition_locator: should either be None to indicate this is a brand new definition or
a pointer to the existing definition to which this block should point or from which this was derived
or a LocalId to indicate that it's new.
If fields does not contain any Scope.content, then definition_locator must have a value meaning that this
block points
to the existing definition. If fields contains Scope.content and definition_locator is not None, then
the Scope.content fields are assumed to be a new payload for definition_locator.
:param block_id: if provided, must not already exist in the structure. Provides the block id for the
new item in this structure. Otherwise, one is computed using the category appended w/ a few digits.
This method creates a new version of the course structure unless the course has a bulk_write operation
active.
It creates and inserts the new block, makes the block point
to the definition which may be new or a new version of an existing or an existing.
Rules for course locator:
* If the course locator specifies a org and course and run and either it doesn't
specify version_guid or the one it specifies == the current head of the branch,
it progresses the course to point
to the new head and sets the active version to point to the new head
* If the locator has a org and course and run but its version_guid != current head, it raises VersionConflictError.
NOTE: using a version_guid will end up creating a new version of the course. Your new item won't be in
the course id'd by version_guid but instead in one w/ a new version_guid. Ensure in this case that you get
the new version_guid from the locator in the returned object!
"""
with self.bulk_operations(course_key):
# split handles all the fields in one dict not separated by scope
fields = fields or {}
fields.update(kwargs.pop('metadata', {}) or {})
definition_data = kwargs.pop('definition_data', {})
if definition_data:
if not isinstance(definition_data, dict):
definition_data = {'data': definition_data} # backward compatibility to mongo's hack
fields.update(definition_data)
# find course_index entry if applicable and structures entry
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
new_def_data = partitioned_fields.get(Scope.content, {})
# persist the definition if persisted != passed
if definition_locator is None or isinstance(definition_locator.definition_id, LocalId):
definition_locator = self.create_definition_from_data(course_key, new_def_data, block_type, user_id)
elif new_def_data:
definition_locator, _ = self.update_definition_from_data(course_key, definition_locator, new_def_data, user_id)
# copy the structure and modify the new one
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
# generate usage id
if block_id is not None:
block_key = BlockKey(block_type, block_id)
if block_key in new_structure['blocks']:
raise DuplicateItemError(block_id, self, 'structures')
else:
block_key = self._generate_block_key(new_structure['blocks'], block_type)
block_fields = partitioned_fields.get(Scope.settings, {})
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
self._update_block_in_structure(new_structure, block_key, self._new_block(
user_id,
block_type,
block_fields,
definition_locator.definition_id,
new_id,
asides=asides
))
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
# see if any search targets changed
if fields is not None:
self._update_search_targets(index_entry, fields)
self._update_head(course_key, index_entry, course_key.branch, new_id)
item_loc = BlockUsageLocator(
course_key.version_agnostic(),
block_type=block_type,
block_id=block_key.id,
)
else:
item_loc = BlockUsageLocator(
CourseLocator(version_guid=new_id),
block_type=block_type,
block_id=block_key.id,
)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# reconstruct the new_item from the cache
return self.get_item(item_loc)
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, asides=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
asides (dict): A dictionary specifying initial values for some or all aside fields
in the newly created block
"""
with self.bulk_operations(parent_usage_key.course_key):
xblock = self.create_item(
user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, asides=asides,
**kwargs)
# skip attach to parent if xblock has 'detached' tag
if 'detached' in xblock._class_tags: # pylint: disable=protected-access
return xblock
# don't version the structure as create_item handled that already.
new_structure = self._lookup_course(xblock.location.course_key).structure
# add new block as child and update parent's version
block_id = BlockKey.from_usage_key(parent_usage_key)
if block_id not in new_structure['blocks']:
raise ItemNotFoundError(parent_usage_key)
parent = new_structure['blocks'][block_id]
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.fields.setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))
else:
parent.fields.setdefault('children', []).insert(
kwargs.get('position'),
BlockKey.from_usage_key(xblock.location)
)
if parent.edit_info.update_version != new_structure['_id']:
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction
self.version_block(parent, user_id, new_structure['_id'])
self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)
# db update
self.update_structure(parent_usage_key.course_key, new_structure)
# don't need to update the index b/c create_item did it for this version
return xblock
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See :meth: `.ModuleStoreWrite.clone_course` for documentation.
In split, other than copying the assets, this is cheap as it merely creates a new version of the
existing course.
"""
source_index = self.get_course_index_info(source_course_id)
if source_index is None:
raise ItemNotFoundError("Cannot find a course at {0}. Aborting".format(source_course_id))
with self.bulk_operations(dest_course_id):
new_course = self.create_course(
dest_course_id.org, dest_course_id.course, dest_course_id.run,
user_id,
fields=fields,
versions_dict=source_index['versions'],
search_targets=source_index['search_targets'],
skip_auto_publish=True,
**kwargs
)
# don't copy assets until we create the course in case something's awry
super(SplitMongoModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
return new_course
DEFAULT_ROOT_COURSE_BLOCK_ID = 'course'
DEFAULT_ROOT_LIBRARY_BLOCK_ID = 'library'
def create_course(
self, org, course, run, user_id, master_branch=None, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Create a new entry in the active courses index which points to an existing or new structure. Returns
the course root of the resulting entry (the location has the course id)
Arguments:
org (str): the organization that owns the course
course (str): the course number of the course
run (str): the particular run of the course (e.g. 2013_T1)
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
course + run: If there are duplicates, this method will raise DuplicateCourseError
fields: if scope.settings fields provided, will set the fields of the root course object in the
new course. If both
settings fields and a starting version are provided (via versions_dict), it will generate a successor version
to the given version,
and update the settings fields with any provided values (via update not setting).
fields (content): if scope.content fields provided, will update the fields of the new course
xblock definition to this. Like settings fields,
if provided, this will cause a new version of any given version as well as a new version of the
definition (which will point to the existing one if given a version). If not provided and given
a version_dict, it will reuse the same definition as that version's course
(obvious since it's reusing the
course). If not provided and no version_dict is given, it will be empty and get the field defaults
when
loaded.
master_branch: the tag (key) for the version name in the dict which is the DRAFT version. Not the actual
version guid, but what to call it.
search_targets: a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
versions_dict: the starting version ids where the keys are the tags such as DRAFT and PUBLISHED
and the values are structure guids. If provided, the new course will reuse this version (unless you also
provide any fields overrides, see above). if not provided, will create a mostly empty course
structure with just a category course root xblock.
"""
# either need to assert this or have a default
assert master_branch is not None
# check course and run's uniqueness
locator = CourseLocator(org=org, course=course, run=run, branch=master_branch)
return self._create_courselike(
locator, user_id, master_branch, fields, versions_dict,
search_targets, root_category, root_block_id, **kwargs
)
def _create_courselike(
self, locator, user_id, master_branch, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Internal code for creating a course or library
"""
index = self.get_course_index(locator, ignore_case=True)
if index is not None:
raise DuplicateCourseError(locator, index)
partitioned_fields = self.partition_fields_by_scope(root_category, fields)
block_fields = partitioned_fields[Scope.settings]
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
definition_fields = self._serialize_fields(root_category, partitioned_fields.get(Scope.content, {}))
# build from inside out: definition, structure, index entry
# if building a wholly new structure
if versions_dict is None or master_branch not in versions_dict:
# create new definition and structure
definition_id = self.create_definition_from_data(locator, definition_fields, root_category, user_id).definition_id
draft_structure = self._new_structure(
user_id,
BlockKey(
root_category,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_ID,
),
block_fields,
definition_id
)
new_id = draft_structure['_id']
if versions_dict is None:
versions_dict = {master_branch: new_id}
else:
versions_dict[master_branch] = new_id
elif block_fields or definition_fields: # pointing to existing course w/ some overrides
# just get the draft_version structure
draft_version = CourseLocator(version_guid=versions_dict[master_branch])
draft_structure = self._lookup_course(draft_version).structure
draft_structure = self.version_structure(locator, draft_structure, user_id)
new_id = draft_structure['_id']
root_block = draft_structure['blocks'][draft_structure['root']]
if block_fields is not None:
root_block.fields.update(self._serialize_fields(root_category, block_fields))
if definition_fields is not None:
old_def = self.get_definition(locator, root_block.definition)
new_fields = old_def['fields']
new_fields.update(definition_fields)
definition_id = self._update_definition_from_data(locator, old_def, new_fields, user_id).definition_id
root_block.definition = definition_id
root_block.edit_info.edited_on = datetime.datetime.now(UTC)
root_block.edit_info.edited_by = user_id
root_block.edit_info.previous_version = root_block.edit_info.update_version
root_block.edit_info.update_version = new_id
versions_dict[master_branch] = new_id
else: # Pointing to an existing course structure
new_id = versions_dict[master_branch]
draft_version = CourseLocator(version_guid=new_id)
draft_structure = self._lookup_course(draft_version).structure
locator = locator.replace(version_guid=new_id)
with self.bulk_operations(locator):
self.update_structure(locator, draft_structure)
index_entry = {
'_id': ObjectId(),
'org': locator.org,
'course': locator.course,
'run': locator.run,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'versions': versions_dict,
'schema_version': self.SCHEMA_VERSION,
'search_targets': search_targets or {},
}
if fields is not None:
self._update_search_targets(index_entry, fields)
self.insert_course_index(locator, index_entry)
# expensive hack to persist default field values set in __init__ method (e.g., wiki_slug)
if isinstance(locator, LibraryLocator):
course = self.get_library(locator, **kwargs)
else:
course = self.get_course(locator, **kwargs)
return self.update_item(course, user_id, **kwargs)
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Create a new library. Arguments are similar to create_course().
"""
kwargs["fields"] = fields
kwargs["master_branch"] = kwargs.get("master_branch", ModuleStoreEnum.BranchName.library)
kwargs["root_category"] = kwargs.get("root_category", "library")
kwargs["root_block_id"] = kwargs.get("root_block_id", "library")
locator = LibraryLocator(org=org, library=library, branch=kwargs["master_branch"])
return self._create_courselike(locator, user_id, **kwargs)
def update_item(self, descriptor, user_id, allow_not_found=False, force=False, **kwargs):
"""
Save the descriptor's fields. it doesn't descend the course dag to save the children.
Return the new descriptor (updated location).
raises ItemNotFoundError if the location does not exist.
Creates a new course version. If the descriptor's location has a org and course and run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
The implementation tries to detect which, if any changes, actually need to be saved and thus won't version
the definition, structure, nor course if they didn't change.
"""
partitioned_fields = self.partition_xblock_fields_by_scope(descriptor)
return self._update_item_from_fields(
user_id, descriptor.location.course_key, BlockKey.from_usage_key(descriptor.location),
partitioned_fields, descriptor.definition_locator, allow_not_found, force, **kwargs
) or descriptor
def _update_item_from_fields(self, user_id, course_key, block_key, partitioned_fields, # pylint: disable=too-many-statements
definition_locator, allow_not_found, force, asides=None, **kwargs):
"""
Broke out guts of update_item for short-circuited internal use only
"""
with self.bulk_operations(course_key):
if allow_not_found and isinstance(block_key.id, (LocalId, type(None))):
fields = {}
for subfields in six.itervalues(partitioned_fields):
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, fields=fields, asides=asides, force=force
)
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key, force)
original_entry = self._get_block_from_structure(original_structure, block_key)
if original_entry is None:
if allow_not_found:
fields = {}
for subfields in six.itervalues(partitioned_fields):
fields.update(subfields)
return self.create_item(user_id, course_key, block_key.type, block_id=block_key.id, fields=fields,
asides=asides, force=force)
else:
raise ItemNotFoundError(course_key.make_usage_key(block_key.type, block_key.id))
is_updated = False
definition_fields = partitioned_fields[Scope.content]
if definition_locator is None:
definition_locator = DefinitionLocator(original_entry.block_type, original_entry.definition)
if definition_fields:
definition_locator, is_updated = self.update_definition_from_data(
course_key, definition_locator, definition_fields, user_id
)
# check metadata
settings = partitioned_fields[Scope.settings]
settings = self._serialize_fields(block_key.type, settings)
if not is_updated:
is_updated = self._compare_settings(settings, original_entry.fields)
# check children
if partitioned_fields.get(Scope.children, {}): # purposely not 'is not None'
serialized_children = [BlockKey.from_usage_key(child) for child in partitioned_fields[Scope.children]['children']]
is_updated = is_updated or original_entry.fields.get('children', []) != serialized_children
if is_updated:
settings['children'] = serialized_children
asides_data_to_update = None
if asides:
asides_data_to_update, asides_updated = self._get_asides_to_update_from_structure(original_structure,
block_key, asides)
else:
asides_updated = False
# if updated, rev the structure
if is_updated or asides_updated:
new_structure = self.version_structure(course_key, original_structure, user_id)
block_data = self._get_block_from_structure(new_structure, block_key)
block_data.definition = definition_locator.definition_id
block_data.fields = settings
if asides_updated:
block_data.asides = asides_data_to_update
new_id = new_structure['_id']
# source_version records which revision a block was copied from. In this method, we're updating
# the block, so it's no longer a direct copy, and we can remove the source_version reference.
block_data.edit_info.source_version = None
self.version_block(block_data, user_id, new_id)
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_search_targets(index_entry, definition_fields)
self._update_search_targets(index_entry, settings)
if isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(
org=index_entry['org'],
library=index_entry['course'],
branch=course_key.branch,
version_guid=new_id
)
else:
course_key = CourseLocator(
org=index_entry['org'],
course=index_entry['course'],
run=index_entry['run'],
branch=course_key.branch,
version_guid=new_id
)
self._update_head(course_key, index_entry, course_key.branch, new_id)
elif isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(version_guid=new_id)
else:
course_key = CourseLocator(version_guid=new_id)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# fetch and return the new item--fetching is unnecessary but a good qc step
new_locator = course_key.make_usage_key(block_key.type, block_key.id)
return self.get_item(new_locator, **kwargs)
else:
return None
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
definition_id=None, parent_xblock=None, **kwargs
):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of json_data. It does not persist it and can create one which
has no usage id.
parent_xblock is used to compute inherited metadata as well as to append the new xblock.
json_data:
- 'block_type': the xmodule block_type
- 'fields': a dict of locally set fields (not inherited) in json format not pythonic typed format!
- 'definition': the object id of the existing definition
"""
assert runtime is not None
xblock_class = runtime.load_block_type(block_type)
json_data = {
'block_type': block_type,
'fields': {},
}
if definition_id is not None:
json_data['definition'] = definition_id
if parent_xblock is None:
# If no parent, then nothing to inherit.
inherited_settings = {}
else:
inherited_settings = parent_xblock.xblock_kvs.inherited_settings.copy()
if fields is not None:
for field_name in inheritance.InheritanceMixin.fields:
if field_name in fields:
inherited_settings[field_name] = fields[field_name]
new_block = runtime.xblock_from_json(
xblock_class,
course_key,
BlockKey(block_type, block_id) if block_id else None,
BlockData(**json_data),
**kwargs
)
for field_name, value in six.iteritems((fields or {})):
setattr(new_block, field_name, value)
if parent_xblock is not None:
parent_xblock.children.append(new_block.scope_ids.usage_id)
# decache pending children field settings
parent_xblock.save()
return new_block
def persist_xblock_dag(self, xblock, user_id, force=False):
"""
create or update the xblock and all of its children. The xblock's location must specify a course.
If it doesn't specify a usage_id, then it's presumed to be new and need creation. This function
descends the children performing the same operation for any that are xblocks. Any children which
are block_ids just update the children pointer.
All updates go into the same course version (bulk updater).
Updates the objects which came in w/ updated location and definition_location info.
returns the post-persisted version of the incoming xblock. Note that its children will be ids not
objects.
:param xblock: the head of the dag
:param user_id: who's doing the change
"""
# find course_index entry if applicable and structures entry
course_key = xblock.location.course_key
with self.bulk_operations(course_key):
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
is_updated = self._persist_subdag(course_key, xblock, user_id, new_structure['blocks'], new_id)
if is_updated:
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_head(course_key, index_entry, xblock.location.branch, new_id)
# fetch and return the new item--fetching is unnecessary but a good qc step
return self.get_item(xblock.location.for_version(new_id))
else:
return xblock
def _persist_subdag(self, course_key, xblock, user_id, structure_blocks, new_id):
# persist the definition if persisted != passed
partitioned_fields = self.partition_xblock_fields_by_scope(xblock)
new_def_data = self._serialize_fields(xblock.category, partitioned_fields[Scope.content])
is_updated = False
if xblock.definition_locator is None or isinstance(xblock.definition_locator.definition_id, LocalId):
xblock.definition_locator = self.create_definition_from_data(
course_key, new_def_data, xblock.category, user_id
)
is_updated = True
elif new_def_data:
xblock.definition_locator, is_updated = self.update_definition_from_data(
course_key, xblock.definition_locator, new_def_data, user_id
)
if isinstance(xblock.scope_ids.usage_id.block_id, LocalId):
# generate an id
is_new = True
is_updated = True
block_id = getattr(xblock.scope_ids.usage_id.block_id, 'block_id', None)
if block_id is None:
block_key = self._generate_block_key(structure_blocks, xblock.scope_ids.block_type)
else:
block_key = BlockKey(xblock.scope_ids.block_type, block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_key.id)
xblock.scope_ids = xblock.scope_ids._replace(usage_id=new_usage_id)
else:
is_new = False
block_key = BlockKey(xblock.scope_ids.block_type, xblock.scope_ids.usage_id.block_id)
children = []
if xblock.has_children:
for child in xblock.children:
if isinstance(child.block_id, LocalId):
child_block = xblock.system.get_block(child)
is_updated = self._persist_subdag(course_key, child_block, user_id, structure_blocks, new_id) or is_updated
children.append(BlockKey.from_usage_key(child_block.location))
else:
children.append(BlockKey.from_usage_key(child))
is_updated = is_updated or structure_blocks[block_key].fields['children'] != children
block_fields = partitioned_fields[Scope.settings]
block_fields = self._serialize_fields(xblock.category, block_fields)
if not is_new and not is_updated:
is_updated = self._compare_settings(block_fields, structure_blocks[block_key].fields)
if children:
block_fields['children'] = children
if is_updated:
if is_new:
block_info = self._new_block(
user_id,
xblock.category,
block_fields,
xblock.definition_locator.definition_id,
new_id,
raw=True
)
else:
block_info = structure_blocks[block_key]
block_info.fields = block_fields
block_info.definition = xblock.definition_locator.definition_id
self.version_block(block_info, user_id, new_id)
structure_blocks[block_key] = block_info
return is_updated
def _compare_settings(self, settings, original_fields):
"""
Return True if the settings are not == to the original fields
:param settings:
:param original_fields:
"""
original_keys = list(original_fields.keys())
if 'children' in original_keys:
original_keys.remove('children')
if len(settings) != len(original_keys):
return True
else:
new_keys = list(settings.keys())
for key in original_keys:
if key not in new_keys or original_fields[key] != settings[key]:
return True
def copy(self, user_id, source_course, destination_course, subtree_list=None, blacklist=None):
"""
Copies each xblock in subtree_list and those blocks descendants excluding blacklist
from source_course to destination_course.
To delete a block in the destination_course, copy its parent and blacklist the other
sibs to keep them from being copies. You can also just call delete_item on the destination.
Ensures that each subtree occurs in the same place in destination as it does in source. If any
of the source's subtree parents are missing from destination, it raises ItemNotFound([parent_ids]).
To determine the same relative order vis-a-vis published siblings,
publishing may involve changing the order of previously published siblings. For example,
if publishing `[c, d]` and source parent has children `[a, b, c, d, e]` and destination parent
currently has children `[e, b]`, there's no obviously correct resulting order; thus, publish will
reorder destination to `[b, c, d, e]` to make it conform with the source.
:param source_course: a CourseLocator (can be a version or course w/ branch)
:param destination_course: a CourseLocator which must be an existing course but branch doesn't have
to exist yet. (The course must exist b/c Locator doesn't have everything necessary to create it).
Note, if the branch doesn't exist, then the source_course structure's root must be in subtree_list;
otherwise, the publish will violate the parents must exist rule.
:param subtree_list: a list of usage keys whose subtrees to publish.
:param blacklist: a list of usage keys to not change in the destination: i.e., don't add
if not there, don't update if there.
Raises:
ItemNotFoundError: if it cannot find the course. if the request is to publish a
subtree but the ancestors up to and including the course root are not published.
"""
# get the destination's index, and source and destination structures.
with self.bulk_operations(source_course):
source_structure = self._lookup_course(source_course).structure
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
# brand new course
raise ItemNotFoundError(destination_course)
if destination_course.branch not in index_entry['versions']:
# must be copying the dag root if there's no current dag
root_block_key = source_structure['root']
if not any(root_block_key == BlockKey.from_usage_key(subtree) for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_key))
root_source = source_structure['blocks'][root_block_key]
# create branch
destination_structure = self._new_structure(
user_id, root_block_key,
# leave off the fields b/c the children must be filtered
definition_id=root_source.definition,
)
else:
destination_structure = self._lookup_course(destination_course).structure
destination_structure = self.version_structure(destination_course, destination_structure, user_id)
if blacklist != EXCLUDE_ALL:
blacklist = [BlockKey.from_usage_key(shunned) for shunned in blacklist or []]
# iterate over subtree list filtering out blacklist.
orphans = set()
destination_blocks = destination_structure['blocks']
for subtree_root in subtree_list:
if BlockKey.from_usage_key(subtree_root) != source_structure['root']:
# find the parents and put root in the right sequence
parents = self._get_parents_from_structure(BlockKey.from_usage_key(subtree_root), source_structure)
parent_found = False
for parent in parents:
# If a parent isn't found in the destination_blocks, it's possible it was renamed
# in the course export. Continue and only throw an exception if *no* parents are found.
if parent in destination_blocks:
parent_found = True
orphans.update(
self._sync_children(
source_structure['blocks'][parent],
destination_blocks[parent],
BlockKey.from_usage_key(subtree_root)
)
)
if len(parents) and not parent_found:
raise ItemNotFoundError(parents)
# update/create the subtree and its children in destination (skipping blacklist)
orphans.update(
self._copy_subdag(
user_id, destination_structure['_id'],
BlockKey.from_usage_key(subtree_root),
source_structure['blocks'],
destination_blocks,
blacklist
)
)
# remove any remaining orphans
for orphan in orphans:
# orphans will include moved as well as deleted xblocks. Only delete the deleted ones.
self._delete_if_true_orphan(orphan, destination_structure)
# update the db
self.update_structure(destination_course, destination_structure)
self._update_head(destination_course, index_entry, destination_course.branch, destination_structure['_id'])
@contract(source_keys="list(BlockUsageLocator)", dest_usage=BlockUsageLocator)
def copy_from_template(self, source_keys, dest_usage, user_id, head_validation=True):
"""
Flexible mechanism for inheriting content from an external course/library/etc.
Will copy all of the XBlocks whose keys are passed as `source_course` so that they become
children of the XBlock whose key is `dest_usage`. Any previously existing children of
`dest_usage` that haven't been replaced/updated by this copy_from_template operation will
be deleted.
Unlike `copy()`, this does not care whether the resulting blocks are positioned similarly
in their new course/library. However, the resulting blocks will be in the same relative
order as `source_keys`.
If any of the blocks specified already exist as children of the destination block, they
will be updated rather than duplicated or replaced. If they have Scope.settings field values
overriding inherited default values, those overrides will be preserved.
IMPORTANT: This method does not preserve block_id - in other words, every block that is
copied will be assigned a new block_id. This is because we assume that the same source block
may be copied into one course in multiple places. However, it *is* guaranteed that every
time this method is called for the same source block and dest_usage, the same resulting
block id will be generated.
:param source_keys: a list of BlockUsageLocators. Order is preserved.
:param dest_usage: The BlockUsageLocator that will become the parent of an inherited copy
of all the xblocks passed in `source_keys`.
:param user_id: The user who will get credit for making this change.
"""
# Preload the block structures for all source courses/libraries/etc.
# so that we can access descendant information quickly
source_structures = {}
for key in source_keys:
course_key = key.course_key
if course_key.branch is None:
raise ItemNotFoundError("branch is required for all source keys when using copy_from_template")
if course_key not in source_structures:
with self.bulk_operations(course_key):
source_structures[course_key] = self._lookup_course(
course_key, head_validation=head_validation
).structure
destination_course = dest_usage.course_key
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
raise ItemNotFoundError(destination_course)
dest_structure = self._lookup_course(destination_course).structure
old_dest_structure_version = dest_structure['_id']
dest_structure = self.version_structure(destination_course, dest_structure, user_id)
# Set of all descendent block IDs of dest_usage that are to be replaced:
block_key = BlockKey(dest_usage.block_type, dest_usage.block_id)
orig_descendants = set(self.descendants(dest_structure['blocks'], block_key, depth=None, descendent_map={}))
# The descendants() method used above adds the block itself, which we don't consider a descendant.
orig_descendants.remove(block_key)
new_descendants = self._copy_from_template(
source_structures, source_keys, dest_structure, block_key, user_id, head_validation
)
# Update the edit info:
dest_info = dest_structure['blocks'][block_key]
# Update the edit_info:
dest_info.edit_info.previous_version = dest_info.edit_info.update_version
dest_info.edit_info.update_version = old_dest_structure_version
dest_info.edit_info.edited_by = user_id
dest_info.edit_info.edited_on = datetime.datetime.now(UTC)
orphans = orig_descendants - new_descendants
for orphan in orphans:
del dest_structure['blocks'][orphan]
self.update_structure(destination_course, dest_structure)
self._update_head(destination_course, index_entry, destination_course.branch, dest_structure['_id'])
# Return usage locators for all the new children:
return [
destination_course.make_usage_key(*k)
for k in dest_structure['blocks'][block_key].fields['children']
]
def _copy_from_template(
self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation
):
"""
Internal recursive implementation of copy_from_template()
Returns the new set of BlockKeys that are the new descendants of the block with key 'block_key'
"""
new_blocks = set()
new_children = list() # ordered list of the new children of new_parent_block_key
for usage_key in source_keys:
src_course_key = usage_key.course_key
hashable_source_id = src_course_key.for_version(None)
block_key = BlockKey(usage_key.block_type, usage_key.block_id)
source_structure = source_structures[src_course_key]
if block_key not in source_structure['blocks']:
raise ItemNotFoundError(usage_key)
source_block_info = source_structure['blocks'][block_key]
# Compute a new block ID. This new block ID must be consistent when this
# method is called with the same (source_key, dest_structure) pair
unique_data = "{}:{}:{}".format(
six.text_type(hashable_source_id).encode("utf-8"),
block_key.id,
new_parent_block_key.id,
)
new_block_id = hashlib.sha1(unique_data.encode('utf-8')).hexdigest()[:20]
new_block_key = BlockKey(block_key.type, new_block_id)
# Now clone block_key to new_block_key:
new_block_info = copy.deepcopy(source_block_info)
# Note that new_block_info now points to the same definition ID entry as source_block_info did
existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())
# Inherit the Scope.settings values from 'fields' to 'defaults'
new_block_info.defaults = new_block_info.fields
# <workaround>
# CAPA modules store their 'markdown' value (an alternate representation of their content)
# in Scope.settings rather than Scope.content :-/
# markdown is a field that really should not be overridable - it fundamentally changes the content.
# capa modules also use a custom editor that always saves their markdown field to the metadata,
# even if it hasn't changed, which breaks our override system.
# So until capa modules are fixed, we special-case them and remove their markdown fields,
# forcing the inherited version to use XML only.
if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:
del new_block_info.defaults['markdown']
# </workaround>
# Preserve any existing overrides
new_block_info.fields = existing_block_info.fields
if 'children' in new_block_info.defaults:
del new_block_info.defaults['children'] # Will be set later
new_block_info.edit_info = existing_block_info.edit_info
new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version
new_block_info.edit_info.update_version = dest_structure['_id']
# Note we do not set 'source_version' - it's only used for copying identical blocks
# from draft to published as part of publishing workflow.
# Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.
new_block_info.edit_info.edited_by = user_id
new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)
new_block_info.edit_info.original_usage = six.text_type(usage_key.replace(branch=None, version_guid=None))
new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version
dest_structure['blocks'][new_block_key] = new_block_info
children = source_block_info.fields.get('children')
if children:
children = [src_course_key.make_usage_key(child.type, child.id) for child in children]
new_blocks |= self._copy_from_template(
source_structures, children, dest_structure, new_block_key, user_id, head_validation
)
new_blocks.add(new_block_key)
# And add new_block_key to the list of new_parent_block_key's new children:
new_children.append(new_block_key)
# Update the children of new_parent_block_key
dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children
return new_blocks
def delete_item(self, usage_locator, user_id, force=False):
"""
Delete the block or tree rooted at block (if delete_children) and any references w/in the course to the block
from a new version of the course structure.
returns CourseLocator for new version
raises ItemNotFoundError if the location does not exist.
raises ValueError if usage_locator points to the structure root
Creates a new course version. If the descriptor's location has a org, a course, and a run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
"""
if not isinstance(usage_locator, BlockUsageLocator) or usage_locator.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_locator)
with self.bulk_operations(usage_locator.course_key):
original_structure = self._lookup_course(usage_locator.course_key).structure
block_key = BlockKey.from_usage_key(usage_locator)
if original_structure['root'] == block_key:
raise ValueError("Cannot delete the root of a course")
if block_key not in original_structure['blocks']:
raise ValueError("Cannot delete block_key {} from course {}, because that block does not exist.".format(
block_key,
usage_locator,
))
index_entry = self._get_index_if_valid(usage_locator.course_key, force)
new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)
new_blocks = new_structure['blocks']
new_id = new_structure['_id']
parent_block_keys = self._get_parents_from_structure(block_key, original_structure)
for parent_block_key in parent_block_keys:
parent_block = new_blocks[parent_block_key]
parent_block.fields['children'].remove(block_key)
parent_block.edit_info.edited_on = datetime.datetime.now(UTC)
parent_block.edit_info.edited_by = user_id
parent_block.edit_info.previous_version = parent_block.edit_info.update_version
parent_block.edit_info.update_version = new_id
# remove the source_version reference
parent_block.edit_info.source_version = None
self.decache_block(usage_locator.course_key, new_id, parent_block_key)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
# update index if appropriate and structures
self.update_structure(usage_locator.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(usage_locator.course_key, index_entry, usage_locator.branch, new_id)
result = usage_locator.course_key.for_version(new_id)
else:
result = CourseLocator(version_guid=new_id)
if isinstance(usage_locator.course_key, LibraryLocator):
self._flag_library_updated_event(usage_locator.course_key)
self._emit_item_deleted_signal(usage_locator, user_id)
return result
@contract(root_block_key=BlockKey, blocks='dict(BlockKey: BlockData)')
def _remove_subtree(self, root_block_key, blocks):
"""
Remove the subtree rooted at root_block_key
We do this breadth-first to make sure that we don't remove
any children that may have parents that we don't want to delete.
"""
# create mapping from each child's key to its parents' keys
child_parent_map = defaultdict(set)
for block_key, block_data in six.iteritems(blocks):
for child in block_data.fields.get('children', []):
child_parent_map[BlockKey(*child)].add(block_key)
to_delete = {root_block_key}
tier = {root_block_key}
while tier:
next_tier = set()
for block_key in tier:
for child in blocks[block_key].fields.get('children', []):
child_block_key = BlockKey(*child)
parents = child_parent_map[child_block_key]
# Make sure we want to delete all of the child's parents
# before slating it for deletion
if parents.issubset(to_delete):
next_tier.add(child_block_key)
tier = next_tier
to_delete.update(tier)
for block_key in to_delete:
del blocks[block_key]
def delete_course(self, course_key, user_id):
"""
Remove the given course from the course index.
Only removes the course from the index. The data remains. You can use create_course
with a versions hash to restore the course; however, the edited_on and
edited_by won't reflect the originals, of course.
"""
# this is the only real delete in the system. should it do something else?
log.info(u"deleting course from split-mongo: %s", course_key)
self.delete_course_index(course_key)
# We do NOT call the super class here since we need to keep the assets
# in case the course is later restored.
# super(SplitMongoModuleStore, self).delete_course(course_key, user_id)
self._emit_course_deleted_signal(course_key)
@contract(block_map="dict(BlockKey: dict)", block_key=BlockKey)
def inherit_settings(
self, block_map, block_key, inherited_settings_map, inheriting_settings=None, inherited_from=None
):
"""
Updates block_data with any inheritable setting set by an ancestor and recurses to children.
"""
if block_key not in block_map:
return
block_data = block_map[block_key]
if inheriting_settings is None:
inheriting_settings = {}
if inherited_from is None:
inherited_from = []
# the currently passed down values take precedence over any previously cached ones
# NOTE: this should show the values which all fields would have if inherited: i.e.,
# not set to the locally defined value but to value set by nearest ancestor who sets it
inherited_settings_map.setdefault(block_key, {}).update(inheriting_settings)
# update the inheriting w/ what should pass to children
inheriting_settings = inherited_settings_map[block_key].copy()
block_fields = block_data.fields
for field_name in inheritance.InheritanceMixin.fields:
if field_name in block_fields:
inheriting_settings[field_name] = block_fields[field_name]
for child in block_fields.get('children', []):
try:
if child in inherited_from:
raise Exception(u'Infinite loop detected when inheriting to {}, having already inherited from {}'.format(child, inherited_from))
self.inherit_settings(
block_map,
BlockKey(*child),
inherited_settings_map,
inheriting_settings,
inherited_from + [child]
)
except KeyError:
# here's where we need logic for looking up in other structures when we allow cross pointers
# but it's also getting this during course creation if creating top down w/ children set or
# migration where the old mongo published had pointers to privates
pass
def descendants(self, block_map, block_id, depth, descendent_map):
"""
adds block and its descendants out to depth to descendent_map
Depth specifies the number of levels of descendants to return
(0 => this usage only, 1 => this usage and its children, etc...)
A depth of None returns all descendants
"""
if block_id not in block_map:
return descendent_map
if block_id not in descendent_map:
descendent_map[block_id] = block_map[block_id]
if depth is None or depth > 0:
depth = depth - 1 if depth is not None else None
for child in descendent_map[block_id].fields.get('children', []):
descendent_map = self.descendants(block_map, child, depth, descendent_map)
return descendent_map
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore, per ModuleStoreEnum.Type.
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.split
def _find_course_assets(self, course_key):
"""
Split specific lookup
"""
try:
course_assets = self._lookup_course(course_key).structure.get('assets', {})
except (InsufficientSpecificationError, VersionConflictError) as err:
log.warning(u'Error finding assets for org "%s" course "%s" on asset '
u'request. Either version of course_key is None or invalid.',
course_key.org, course_key.course)
return {}
return course_assets
def _update_course_assets(self, user_id, asset_key, update_function):
"""
A wrapper for functions wanting to manipulate assets. Gets and versions the structure,
passes the mutable array for either 'assets' or 'thumbnails' as well as the idx to the function for it to
update, then persists the changed data back into the course.
The update function can raise an exception if it doesn't want to actually do the commit. The
surrounding method probably should catch that exception.
"""
with self.bulk_operations(asset_key.course_key):
original_structure = self._lookup_course(asset_key.course_key).structure
index_entry = self._get_index_if_valid(asset_key.course_key)
new_structure = self.version_structure(asset_key.course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
asset_type = asset_key.asset_type
all_assets = SortedAssetList(iterable=course_assets.setdefault(asset_type, []))
asset_idx = all_assets.find(asset_key)
all_assets_updated = update_function(all_assets, asset_idx)
new_structure['assets'][asset_type] = list(all_assets_updated)
# update index if appropriate and structures
self.update_structure(asset_key.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(asset_key.course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves a list of AssetMetadata to the modulestore. The list can be composed of multiple
asset types. This method is optimized for multiple inserts at once - it only re-saves the structure
at the end of all saves/updates.
"""
# Determine course key to use in bulk operation. Use the first asset assuming that
# all assets will be for the same course.
asset_key = asset_metadata_list[0].asset_id
course_key = asset_key.course_key
with self.bulk_operations(course_key):
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key)
new_structure = self.version_structure(course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
assets_by_type = self._save_assets_by_type(
course_key, asset_metadata_list, course_assets, user_id, import_only
)
for asset_type, assets in six.iteritems(assets_by_type):
new_structure['assets'][asset_type] = list(assets)
# update index if appropriate and structures
self.update_structure(course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves or updates a single asset. Simply makes it a list and calls the list save above.
"""
return self.save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_key='AssetKey', attr_dict=dict)
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
def _internal_method(all_assets, asset_idx):
"""
Update the found item
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
mdata = AssetMetadata(asset_key, asset_key.path)
mdata.from_storable(all_assets[asset_idx])
mdata.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets.insert_or_update(mdata)
return all_assets
self._update_course_assets(user_id, asset_key, _internal_method)
@contract(asset_key='AssetKey')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
def _internal_method(all_asset_info, asset_idx):
"""
Remove the item if it was found
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
all_asset_info.pop(asset_idx)
return all_asset_info
try:
self._update_course_assets(user_id, asset_key, _internal_method)
return 1
except ItemNotFoundError:
return 0
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_structure = self._lookup_course(source_course_key).structure
with self.bulk_operations(dest_course_key):
original_structure = self._lookup_course(dest_course_key).structure
index_entry = self._get_index_if_valid(dest_course_key)
new_structure = self.version_structure(dest_course_key, original_structure, user_id)
new_structure['assets'] = source_structure.get('assets', {})
new_structure['thumbnails'] = source_structure.get('thumbnails', [])
# update index if appropriate and structures
self.update_structure(dest_course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])
def fix_not_found(self, course_locator, user_id):
"""
Only intended for rather low level methods to use. Goes through the children attrs of
each block removing any whose block_id is not a member of the course.
:param course_locator: the course to clean
"""
original_structure = self._lookup_course(course_locator).structure
index_entry = self._get_index_if_valid(course_locator)
new_structure = self.version_structure(course_locator, original_structure, user_id)
for block in six.itervalues(new_structure['blocks']):
if 'children' in block.fields:
block.fields['children'] = [
block_id for block_id in block.fields['children']
if block_id in new_structure['blocks']
]
self.update_structure(course_locator, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_locator, index_entry, course_locator.branch, new_structure['_id'])
def convert_references_to_keys(self, course_key, xblock_class, jsonfields, blocks):
"""
Convert the given serialized fields to the deserialized values by finding all references
and converting them.
:param jsonfields: the serialized copy of the xblock's fields
"""
@contract(block_key="BlockUsageLocator | seq[2]")
def robust_usage_key(block_key):
"""
create a course_key relative usage key for the block_key. If the block_key is in blocks,
use its correct category; otherwise, use 'unknown'.
The purpose for this is that some operations add pointers as they build up the
structure without worrying about order of creation. Because the category of the
usage_key is for the most part inert, it's better to hack a value than to work
out a dependency graph algorithm for those functions which may prereference blocks.
"""
# if this was taken from cache, then its fields are already converted
if isinstance(block_key, BlockUsageLocator):
return block_key.map_into_course(course_key)
elif not isinstance(block_key, BlockKey):
block_key = BlockKey(*block_key)
try:
return course_key.make_usage_key(
block_key.type, block_key.id
)
except KeyError:
return course_key.make_usage_key('unknown', block_key.id)
xblock_class = self.mixologist.mix(xblock_class)
# Make a shallow copy, so that we aren't manipulating a cached field dictionary
output_fields = dict(jsonfields)
for field_name, value in six.iteritems(output_fields):
if value:
try:
field = xblock_class.fields.get(field_name)
except AttributeError:
continue
if isinstance(field, Reference):
output_fields[field_name] = robust_usage_key(value)
elif isinstance(field, ReferenceList):
output_fields[field_name] = [robust_usage_key(ele) for ele in value]
elif isinstance(field, ReferenceValueDict):
for key, subvalue in six.iteritems(value):
value[key] = robust_usage_key(subvalue)
return output_fields
def _get_index_if_valid(self, course_key, force=False):
"""
If the course_key identifies a course and points to its draft (or plausibly its draft),
then return the index entry.
raises VersionConflictError if not the right version
:param course_key: a CourseLocator
:param force: if false, raises VersionConflictError if the current head of the course != the one identified
by course_key
"""
if course_key.org is None or course_key.course is None or course_key.run is None or course_key.branch is None:
return None
else:
index_entry = self.get_course_index(course_key)
is_head = (
course_key.version_guid is None or
index_entry['versions'][course_key.branch] == course_key.version_guid
)
if is_head or force:
return index_entry
else:
raise VersionConflictError(
course_key,
index_entry['versions'][course_key.branch]
)
def _find_local_root(self, element_to_find, possibility, tree):
if possibility not in tree:
return False
if element_to_find in tree[possibility]:
return True
for subtree in tree[possibility]:
if self._find_local_root(element_to_find, subtree, tree):
return True
return False
def _update_search_targets(self, index_entry, fields):
"""
Update the index entry if any of the given fields are in SEARCH_TARGET_DICT. (doesn't save
the changes, just changes them in the entry dict)
:param index_entry:
:param fields: a dictionary of fields and values usually only those explicitly set and already
ready for persisting (e.g., references converted to block_ids)
"""
for field_name, field_value in six.iteritems(fields):
if field_name in self.SEARCH_TARGET_DICT:
index_entry.setdefault('search_targets', {})[field_name] = field_value
def _update_head(self, course_key, index_entry, branch, new_id):
"""
Update the active index for the given course's branch to point to new_id
:param index_entry:
:param course_locator:
:param new_id:
"""
if not isinstance(new_id, ObjectId):
raise TypeError('new_id must be an ObjectId, but is {!r}'.format(new_id))
index_entry['versions'][branch] = new_id
self.update_course_index(course_key, index_entry)
def partition_xblock_fields_by_scope(self, xblock):
"""
Return a dictionary of scopes mapped to this xblock's explicitly set fields w/o any conversions
"""
# explicitly_set_fields_by_scope converts to json; so, avoiding it
# the existing partition_fields_by_scope works on a dict not an xblock
result = defaultdict(dict)
for field in six.itervalues(xblock.fields):
if field.is_set_on(xblock):
result[field.scope][field.name] = field.read_from(xblock)
return result
def _serialize_fields(self, category, fields):
"""
Convert any references to their serialized form. Handle some references already being unicoded
because the client passed them that way and nothing above this layer did the necessary deserialization.
Remove any fields which split or its kvs computes or adds but does not want persisted.
:param fields: a dict of fields
"""
assert isinstance(fields, dict)
xblock_class = XBlock.load_class(category, self.default_class)
xblock_class = self.mixologist.mix(xblock_class)
def reference_block_id(reference):
"""
Handle client possibly setting field to strings rather than keys to get the block_id
"""
# perhaps replace by fixing the views or Field Reference*.from_json to return a Key
if isinstance(reference, six.string_types):
reference = BlockUsageLocator.from_string(reference)
elif isinstance(reference, BlockKey):
return reference
return BlockKey.from_usage_key(reference)
for field_name, value in six.iteritems(fields):
if value is not None:
if isinstance(xblock_class.fields[field_name], Reference):
fields[field_name] = reference_block_id(value)
elif isinstance(xblock_class.fields[field_name], ReferenceList):
fields[field_name] = [
reference_block_id(ele) for ele in value
]
elif isinstance(xblock_class.fields[field_name], ReferenceValueDict):
for key, subvalue in six.iteritems(value):
value[key] = reference_block_id(subvalue)
# should this recurse down dicts and lists just in case they contain datetime?
elif not isinstance(value, datetime.datetime): # don't convert datetimes!
fields[field_name] = xblock_class.fields[field_name].to_json(value)
return fields
def _new_structure(self, user_id, root_block_key, block_fields=None, definition_id=None):
"""
Internal function: create a structure element with no previous version. Must provide the root id
but not necessarily the info needed to create it (for the use case of publishing). If providing
root_category, must also provide block_fields and definition_id
"""
new_id = ObjectId()
if root_block_key is not None:
if block_fields is None:
block_fields = {}
blocks = {
root_block_key: self._new_block(
user_id, root_block_key.type, block_fields, definition_id, new_id
)
}
else:
blocks = {}
return {
'_id': new_id,
'root': root_block_key,
'previous_version': None,
'original_version': new_id,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'blocks': blocks,
'schema_version': self.SCHEMA_VERSION,
}
@contract(block_key=BlockKey)
def _get_parents_from_structure(self, block_key, structure):
"""
Given a structure, find block_key's parent in that structure. Note returns
the encoded format for parent
"""
return [
parent_block_key
for parent_block_key, value in six.iteritems(structure['blocks'])
if block_key in value.fields.get('children', [])
]
def _sync_children(self, source_parent, destination_parent, new_child):
"""
Reorder destination's children to the same as source's and remove any no longer in source.
Return the removed ones as orphans (a set).
"""
destination_reordered = []
destination_children = set(destination_parent.fields['children'])
source_children = source_parent.fields['children']
orphans = destination_children - set(source_children)
for child in source_children:
if child == new_child or child in destination_children:
destination_reordered.append(child)
destination_parent.fields['children'] = destination_reordered
return orphans
@contract(
block_key=BlockKey,
source_blocks="dict(BlockKey: *)",
destination_blocks="dict(BlockKey: *)",
blacklist="list(BlockKey) | str",
)
def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):
"""
Update destination_blocks for the sub-dag rooted at block_key to be like the one in
source_blocks excluding blacklist.
Return any newly discovered orphans (as a set)
"""
orphans = set()
destination_block = destination_blocks.get(block_key)
new_block = source_blocks[block_key]
if destination_block:
# reorder children to correspond to whatever order holds for source.
# remove any which source no longer claims (put into orphans)
# add any which are being copied
source_children = new_block.fields.get('children', [])
existing_children = destination_block.fields.get('children', [])
destination_reordered = SparseList()
for child in existing_children:
try:
index = source_children.index(child)
destination_reordered[index] = child
except ValueError:
orphans.add(BlockKey(*child))
if blacklist != EXCLUDE_ALL:
for index, child in enumerate(source_children):
if child not in blacklist:
destination_reordered[index] = child
# the history of the published leaps between publications and only points to
# previously published versions.
previous_version = destination_block.edit_info.update_version
destination_block = copy.deepcopy(new_block)
destination_block.fields['children'] = destination_reordered.compact_list()
destination_block.edit_info.previous_version = previous_version
destination_block.edit_info.update_version = destination_version
destination_block.edit_info.edited_by = user_id
destination_block.edit_info.edited_on = datetime.datetime.now(UTC)
else:
destination_block = self._new_block(
user_id, new_block.block_type,
self._filter_blacklist(copy.copy(new_block.fields), blacklist),
new_block.definition,
destination_version,
raw=True,
asides=new_block.asides,
block_defaults=new_block.defaults
)
# Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):
for key, val in six.iteritems(new_block.edit_info.to_storable()):
if getattr(destination_block.edit_info, key) is None:
setattr(destination_block.edit_info, key, val)
# If the block we are copying from was itself a copy, then just
# reference the original source, rather than the copy.
destination_block.edit_info.source_version = (
new_block.edit_info.source_version or new_block.edit_info.update_version
)
if blacklist != EXCLUDE_ALL:
for child in destination_block.fields.get('children', []):
if child not in blacklist:
orphans.update(
self._copy_subdag(
user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist
)
)
destination_blocks[block_key] = destination_block
return orphans
@contract(blacklist='list(BlockKey) | str')
def _filter_blacklist(self, fields, blacklist):
"""
Filter out blacklist from the children field in fields. Will construct a new list for children;
so, no need to worry about copying the children field, but it will modify fiels.
"""
if blacklist == EXCLUDE_ALL:
fields['children'] = []
else:
fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]
return fields
@contract(orphan=BlockKey)
def _delete_if_true_orphan(self, orphan, structure):
"""
Delete the orphan and any of its descendants which no longer have parents.
"""
if len(self._get_parents_from_structure(orphan, structure)) == 0:
orphan_data = structure['blocks'].pop(orphan)
for child in orphan_data.fields.get('children', []):
self._delete_if_true_orphan(BlockKey(*child), structure)
@contract(returns=BlockData)
def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False,
asides=None, block_defaults=None):
"""
Create the core document structure for a block.
:param block_fields: the settings and children scoped fields as a dict or son
:param definition_id: the pointer to the content scoped fields
:param new_id: the structure's version id
:param raw: true if this block already has all references serialized
:param asides: dict information related to the connected xblock asides
"""
if not raw:
block_fields = self._serialize_fields(category, block_fields)
if not asides:
asides = {}
document = {
'block_type': category,
'definition': definition_id,
'fields': block_fields,
'asides': asides,
'edit_info': {
'edited_on': datetime.datetime.now(UTC),
'edited_by': user_id,
'previous_version': None,
'update_version': new_id
}
}
if block_defaults:
document['defaults'] = block_defaults
return BlockData(**document)
@contract(block_key=BlockKey, returns='BlockData | None')
def _get_block_from_structure(self, structure, block_key):
"""
Encodes the block key before retrieving it from the structure to ensure it can
be a json dict key.
"""
return structure['blocks'].get(block_key)
@contract(block_key=BlockKey)
def _get_asides_to_update_from_structure(self, structure, block_key, asides):
"""
Get list of aside fields that should be updated/inserted
"""
block = self._get_block_from_structure(structure, block_key)
if asides:
updated = False
tmp_new_asides_data = {}
for asd in asides:
aside_type = asd['aside_type']
tmp_new_asides_data[aside_type] = asd
result_list = []
for i, aside in enumerate(block.asides):
if aside['aside_type'] in tmp_new_asides_data:
result_list.append(tmp_new_asides_data.pop(aside['aside_type']))
updated = True
else:
result_list.append(aside)
if tmp_new_asides_data:
for _, asd in six.iteritems(tmp_new_asides_data):
result_list.append(asd)
updated = True
return result_list, updated
else:
return block.asides, False
@contract(block_key=BlockKey, content=BlockData)
def _update_block_in_structure(self, structure, block_key, content):
"""
Encodes the block key before accessing it in the structure to ensure it can
be a json dict key.
"""
structure['blocks'][block_key] = content
@autoretry_read()
def find_courses_by_search_target(self, field_name, field_value):
"""
Find all the courses which cached that they have the given field with the given value.
Returns: list of branch-agnostic course_keys
"""
entries = self.find_matching_course_indexes(
search_targets={field_name: field_value}
)
return [
CourseLocator(entry['org'], entry['course'], entry['run']) # Branch agnostic
for entry in entries
]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
return self.find_courses_by_search_target('wiki_slug', wiki_slug)
def heartbeat(self):
"""
Check that the db is reachable.
"""
return {ModuleStoreEnum.Type.split: self.db_connection.heartbeat()}
def create_runtime(self, course_entry, lazy):
"""
Create the proper runtime for this course
"""
services = self.services
services["partitions"] = PartitionService(course_entry.course_key)
return CachingDescriptorSystem(
modulestore=self,
course_entry=course_entry,
module_data={},
lazy=lazy,
default_class=self.default_class,
error_tracker=self.error_tracker,
render_template=self.render_template,
mixins=self.xblock_mixins,
select=self.xblock_select,
disabled_xblock_types=self.disabled_xblock_types,
services=services,
)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.db_connection.ensure_indexes()
class SparseList(list):
"""
Enable inserting items into a list in arbitrary order and then retrieving them.
"""
# taken from http://stackoverflow.com/questions/1857780/sparse-assignment-list-in-python
def __setitem__(self, index, value):
"""
Add value to the list ensuring the list is long enough to accommodate it at the given index
"""
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def compact_list(self):
"""
Return as a regular lists w/ all Nones removed
"""
return [ele for ele in self if ele is not None]
|
# PMData.py
# Phenotype microarray module for parsing optical density data
#
# Author: Daniel A Cuevas
# Created on 12 Dec 2013
# Updated on 11 May 2017
from __future__ import absolute_import, division, print_function
import pandas as pd
class PMData:
"""Class for parsing phenotype microarray data"""
def __init__(self, filepath, plateFlag):
self.plateFlag = plateFlag
self.replicates = {} # Hash of clone->[reps]
self.clones = [] # Set of unique clone names
self.wells = []
# Primary data structure to access data
self.DF = pd.DataFrame()
self.__loadData(filepath)
self.__init()
def __loadData(self, filepath):
"""Load data into Pandas DataFrame"""
indices = ["sample", "rep", "well", "time"]
self.DF = pd.read_csv(filepath, delimiter="\t", index_col=indices,
dtype={"sample": str, "rep": str})
self.DF = self.DF.sort_index(level=[0, 1, 2, 3])
def __init(self):
"""Initialize all class variables"""
self.__sortWells()
def __sortWells(self):
"""Sort wells numerically rather than alphanumerically"""
#self.wells = [(x[0], int(x[1:]))
# for x in self.DF.index.levels[2]]
#self.wells = sorted(self.wells, key=itemgetter(0, 1))
#self.wells = ["{}{}".format(x[0], x[1]) for x in self.wells]
self.wells = self.DF.index.get_level_values("well").unique()
def __QACheck(self):
"""QA check to ensure stable data set"""
pass
def getSampleNames(self):
return self.DF.index.get_level_values("sample").unique()
def getNumSamples(self):
return len(self.getSampleNames())
def getReplicates(self, sample):
return self.DF.loc[sample].index.get_level_values("rep")
def getWells(self):
"""
Return well information
With plate info: return a DataFrame
Without plate info: return an Index array
"""
if self.plateFlag:
# Grab only the mainsource and compound columns
wells = self.DF[["mainsource", "compound"]]
# Remove duplicate items by grouping by the well id and
wells = wells.groupby(level="well", sort=False).last()
else:
wells = self.wells
return wells
def getNumWells(self):
return len(self.getWells())
def getODCurve(self, sample, well, rep):
"""Retrieve a single OD curve"""
return self.DF.loc[(sample, rep, well, slice(None))]["od"]
def getMedianCurves(self):
"""Return DataFrame median curves for each sample"""
df = self.DF.median(level=["sample", "well", "time"])
if self.plateFlag:
leftMerge = df.reset_index()
rightMerge = self.DF[["mainsource", "compound"]].reset_index(
level=[1, 3], drop=True).reset_index().drop_duplicates()
df = pd.merge(
leftMerge,
rightMerge,
on=["sample", "well"],
how="left"
).set_index(["sample", "well"])
return df
def getMeanCurves(self):
"""Return DataFrame of mean curves for each sample"""
df = self.DF.mean(level=["sample", "well", "time"])
if self.plateFlag:
leftMerge = df.reset_index()
rightMerge = self.DF[["mainsource", "compound"]].reset_index(
level=[1, 3], drop=True).reset_index().drop_duplicates()
df = pd.merge(
leftMerge,
rightMerge,
on=["sample", "well"],
how="left"
).set_index(["sample", "well"])
return df
|
import gtk, pango, sys, os
gtk.gdk.threads_init()
# PyGTK GUI implementation
class GUI:
def __init__(self):
self.checkboxes = []
self.comboboxes = []
self.textfields = []
def doInstaller(self, model):
self.model = model
# Create Window
self.window = create_window(model.title)
add_logo(self.window, model.logo)
# Create notebook
notebook = gtk.Notebook()
append_component(self.window.main_vbox, notebook)
# Create tabs
for tab in model.tabs:
box = gtk.VBox()
notebook.append_page(box, gtk.Label(tab.label))
self._do_tab(box, tab)
# Create buttons
but_box = gtk.HBox()
append_component(self.window.main_vbox, but_box)
for button in model.buttons:
b = gtk.Button(label=button.label)
b.connect("clicked", self.callback_button, None)
b.pmodel = button
but_box.pack_start(b)
# Show everything
self.window.show_all()
gtk.main()
# Configure one tab
def _do_tab(self, container, tab):
# Add text
if hasattr(tab, 'text'):
label = gtk.Label(tab.text)
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_alignment(0, 0)
label.set_line_wrap(True)
label.set_size_request(-1, -1)
append_component(container, label)
# Add groups
for group in tab.groups:
frame = gtk.Frame(group.label)
append_component(container, frame)
self._do_group(frame, group)
# Configure one group
def _do_group(self, container, group):
box = gtk.VBox()
container.add(box)
# Define orientation for checkboxes
if group.orientation == 'horizontal':
cb_box = gtk.HBox()
box.pack_start(cb_box, False, False, 0)
space = 5
else:
cb_box = box
space = 0
# Add Checkboxes
for checkbox in group.checkboxes:
cb = create_checkbox(checkbox)
self.checkboxes.append(cb)
append_component(cb_box, cb, space)
# Add comboboxes
for combo in group.comboboxes:
label = gtk.Label(combo.label + ':')
gcombo = create_combo(combo)
self.comboboxes.append(gcombo)
co_box = gtk.HBox()
co_box.pack_start(label, False, False, 5)
co_box.pack_end(gcombo)
box.pack_start(co_box, False, False, 0)
# Add TextFields
for textfield in group.textfields:
label = gtk.Label(textfield.label + ':')
entry = gtk.Entry()
entry.set_text(textfield.value)
self.textfields.append(entry)
entry.pmodel = textfield
t_box = gtk.HBox()
t_box.pack_start(label, False, False, 3)
t_box.pack_end(entry)
box.pack_start(t_box, False, False, 0)
# Response functions
def callback_button(self, button, data=None):
self.update_model()
button.pmodel.callback()
# Iterate over components and update model
def update_model(self):
for c in self.checkboxes:
c.pmodel.checked = c.get_active()
for c in self.comboboxes:
c.pmodel.selected = _combo_get_selected(c)
for t in self.textfields:
t.pmodel.value = t.get_text()
## EXECUTOR
def doExecutor(self, e):
# Quit old window
self.window.hide()
# Create window
self.remove_next_line = False
self.window = create_window(e.title)
add_logo(self.window, e.logo)
self.window.set_resizable(True)
self.window.resize(376,450)
# Terminal window
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.textview = gtk.TextView()
self.textview.set_editable(False)
self.textbuffer = self.textview.get_buffer()
sw.add(self.textview)
self.window.main_vbox.pack_start(sw, True, True, 0)
# Tags
self.normal_tag = self.textbuffer.create_tag(font="Monospace")
self.section_tag = self.textbuffer.create_tag(weight=pango.WEIGHT_BOLD)
self.ok_tag = self.textbuffer.create_tag(weight=pango.WEIGHT_BOLD, foreground='Blue')
self.error_tag = self.textbuffer.create_tag(weight=pango.WEIGHT_BOLD, foreground='Red')
# Create buttons
but_box = gtk.HBox()
append_component(self.window.main_vbox, but_box)
self.buttons = []
for button in e.buttons:
b = gtk.Button(label=button.label)
b.connect("clicked", self.callback_button, None)
b.pmodel = button
self.buttons.append(b)
but_box.pack_start(b)
# Show everything
self.window.show_all()
def write_command_line(self, line):
# Finished installation, change button
if line == "END":
self.buttons[0].set_label("Close")
return
# What tag to use
tag = self.normal_tag
if line[0:2] == '# ':
tag = self.section_tag
line = line[2:]
elif line == "[ OK ]\n":
tag = self.ok_tag
elif line[0:3] == '!! ':
tag = self.error_tag
line = line[2:]
# Safe GTK thread
gtk.gdk.threads_enter()
# Delete last line if it is \r
if self.remove_next_line and line != '\n':
it = self.textbuffer.get_iter_at_line(self.textbuffer.get_line_count()-2)
self.textbuffer.delete(it, self.textbuffer.get_end_iter())
self.remove_next_line = False
# Insert text and relocate scroll
self.textbuffer.insert_with_tags(self.textbuffer.get_end_iter(), line, tag)
self.textview.scroll_to_iter(self.textbuffer.get_end_iter(), 0)
# Safe GTK Thread
gtk.gdk.threads_leave()
if line[-1] == '\r': self.remove_next_line = True
# Quit
def quit(self):
gtk.main_quit()
# Auxiliary function
def create_window(title):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("destroy", lambda w: sys.exit(1))
window.set_position(gtk.WIN_POS_CENTER)
window.set_title(title)
window.set_border_width(0)
window.set_resizable(False)
mainBox = gtk.VBox()
window.add(mainBox)
window.main_vbox = mainBox
return window
def add_logo(window, logoFile):
logoImg = gtk.gdk.pixbuf_new_from_file(logoFile)
logo = gtk.Image()
#logo.set_from_pixbuf(logoImg.scale_simple(100,100,gtk.gdk.INTERP_BILINEAR))
logo.set_from_pixbuf(logoImg)
#logo.set_size_request(100, 100)
window.main_vbox.pack_start(logo, False, False, 5)
def create_checkbox(checkbox):
checkButton = gtk.CheckButton(checkbox.label)
checkButton.set_active(checkbox.checked)
checkButton.set_alignment(0, 0)
checkButton.pmodel = checkbox
return checkButton
def create_combo(combo):
gcombo = gtk.combo_box_new_text()
i = 0
for option in combo.options:
gcombo.append_text(option)
if option == combo.selected:
gcombo.set_active(i)
i = i+1
gcombo.pmodel = combo
return gcombo
def append_component(where, what, space=5):
where.pack_start(what, False, False, space)
def _combo_get_selected(combobox):
model = combobox.get_model()
active = combobox.get_active()
if active < 0:
return None
return model[active][0]
|
import numpy as np
class Perceptron:
''' Perceptron classifer
Parameters
------------
eta : float
Learing rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-------------
w_ : ld-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch
'''
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
''' Fit training data.
Parameters
------------
X : array_like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
------------
self : object
'''
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
def net_input(self, X):
''' Calculate net input '''
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
''' Return class label after unit step '''
return np.where(self.net_input(X) >= 0.0, 1, -1)
|
# (c) 2016, Allen Sanabria <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path, walk
import re
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def _mutually_exclusive(self):
dir_arguments = [
self.source_dir, self.files_matching, self.ignore_files,
self.depth
]
if self.source_file and None not in dir_arguments:
err_msg = (
"Can not include {0} with file argument"
.format(", ".join(self.VALID_DIR_ARGUMENTS))
)
raise AnsibleError(err_msg)
elif self.source_dir and self.source_file:
err_msg = (
"Need to pass either file or dir"
)
raise AnsibleError(err_msg)
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, str):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed
"""
self.VALID_DIR_ARGUMENTS = [
'dir', 'depth', 'files_matching', 'ignore_files'
]
self.VALID_FILE_ARGUMENTS = ['file', '_raw_params']
self.GLOBAL_FILE_ARGUMENTS = ['name']
self.VALID_ARGUMENTS = (
self.VALID_DIR_ARGUMENTS + self.VALID_FILE_ARGUMENTS +
self.GLOBAL_FILE_ARGUMENTS
)
for arg in self._task.args:
if arg not in self.VALID_ARGUMENTS:
err_msg = '{0} is not a valid option in debug'.format(arg)
raise AnsibleError(err_msg)
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_files = self._task.args.get('ignore_files', None)
self._mutually_exclusive()
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
self.VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
if not task_vars:
task_vars = dict()
self.show_content = True
self._set_args()
results = dict()
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if path.exists(self.source_dir):
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (
self._load_files_in_dir(root_dir, filenames)
)
if not failed:
results.update(updated_results)
else:
break
else:
failed = True
err_msg = (
'{0} directory does not exist'.format(self.source_dir)
)
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(tmp, task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
current_depth = 0
sorted_walk = list(walk(self.source_dir))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
current_depth += 1
if current_depth <= self.depth or self.depth == 0:
current_files.sort()
yield (current_root, current_files)
else:
break
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
success = False
file_ext = source_file.split('.')
if len(file_ext) >= 1:
if file_ext[-1] in self.VALID_FILE_EXTENSIONS:
success = True
return success
return success
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
err_msg = (
'{0} does not have a valid extension: {1}'
.format(filename, ', '.join(self.VALID_FILE_EXTENSIONS))
)
return failed, err_msg, results
b_data, show_content = self._loader._get_file_contents(filename)
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content = show_content
data = self._loader.load(data, show_content)
if not data:
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = (
'{0} must be stored as a dictionary/hash'
.format(filename)
)
else:
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if filename == 'main.yml':
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
|
"""Http related parsers and protocol."""
import collections
import functools
import http.server
import itertools
import re
import string
import sys
import zlib
from wsgiref.handlers import format_date_time
import aiohttp
from . import errors, hdrs
from .multidict import CIMultiDict
from .log import internal_logger
__all__ = ['HttpMessage', 'Request', 'Response',
'HttpVersion', 'HttpVersion10', 'HttpVersion11',
'RawRequestMessage', 'RawResponseMessage',
'HttpPrefixParser', 'HttpRequestParser', 'HttpResponseParser',
'HttpPayloadParser']
ASCIISET = set(string.printable)
METHRE = re.compile('[A-Z0-9$-_.]+')
VERSRE = re.compile('HTTP/(\d+).(\d+)')
HDRRE = re.compile('[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]')
CONTINUATION = (' ', '\t')
EOF_MARKER = object()
EOL_MARKER = object()
RESPONSES = http.server.BaseHTTPRequestHandler.responses
HttpVersion = collections.namedtuple(
'HttpVersion', ['major', 'minor'])
HttpVersion10 = HttpVersion(1, 0)
HttpVersion11 = HttpVersion(1, 1)
RawRequestMessage = collections.namedtuple(
'RawRequestMessage',
['method', 'path', 'version', 'headers', 'should_close', 'compression'])
RawResponseMessage = collections.namedtuple(
'RawResponseMessage',
['version', 'code', 'reason', 'headers', 'should_close', 'compression'])
class HttpParser:
def __init__(self, max_line_size=8190, max_headers=32768,
max_field_size=8190):
self.max_line_size = max_line_size
self.max_headers = max_headers
self.max_field_size = max_field_size
def parse_headers(self, lines):
"""Parses RFC2822 headers from a stream.
Line continuations are supported. Returns list of header name
and value pairs. Header name is in upper case.
"""
close_conn = None
encoding = None
headers = CIMultiDict()
lines_idx = 1
line = lines[1]
while line:
header_length = len(line)
# Parse initial header name : value pair.
try:
name, value = line.split(':', 1)
except ValueError:
raise errors.InvalidHeader(line) from None
name = name.strip(' \t').upper()
if HDRRE.search(name):
raise errors.InvalidHeader(name)
# next line
lines_idx += 1
line = lines[lines_idx]
# consume continuation lines
continuation = line and line[0] in CONTINUATION
if continuation:
value = [value]
while continuation:
header_length += len(line)
if header_length > self.max_field_size:
raise errors.LineTooLong(
'limit request headers fields size')
value.append(line)
# next line
lines_idx += 1
line = lines[lines_idx]
continuation = line[0] in CONTINUATION
value = '\r\n'.join(value)
else:
if header_length > self.max_field_size:
raise errors.LineTooLong(
'limit request headers fields size')
value = value.strip()
# keep-alive and encoding
if name == hdrs.CONNECTION:
v = value.lower()
if v == 'close':
close_conn = True
elif v == 'keep-alive':
close_conn = False
elif name == hdrs.CONTENT_ENCODING:
enc = value.lower()
if enc in ('gzip', 'deflate'):
encoding = enc
headers.add(name, value)
return headers, close_conn, encoding
class HttpPrefixParser:
"""Waits for 'HTTP' prefix (non destructive)"""
def __init__(self, allowed_methods=()):
self.allowed_methods = [m.upper() for m in allowed_methods]
def __call__(self, out, buf):
raw_data = yield from buf.waituntil(b' ', 24)
method = raw_data.decode('ascii', 'surrogateescape').strip()
# method
method = method.upper()
if not METHRE.match(method):
raise errors.BadStatusLine(method)
# allowed method
if self.allowed_methods and method not in self.allowed_methods:
raise errors.HttpMethodNotAllowed(message=method)
out.feed_data(method)
out.feed_eof()
class HttpRequestParser(HttpParser):
"""Read request status line. Exception errors.BadStatusLine
could be raised in case of any errors in status line.
Returns RawRequestMessage.
"""
def __call__(self, out, buf):
# read http message (request line + headers)
try:
raw_data = yield from buf.readuntil(
b'\r\n\r\n', self.max_headers)
except errors.LineLimitExceededParserError as exc:
raise errors.LineTooLong(exc.limit) from None
lines = raw_data.decode(
'utf-8', 'surrogateescape').split('\r\n')
# request line
line = lines[0]
try:
method, path, version = line.split(None, 2)
except ValueError:
raise errors.BadStatusLine(line) from None
# method
method = method.upper()
if not METHRE.match(method):
raise errors.BadStatusLine(method)
# version
match = VERSRE.match(version)
if match is None:
raise errors.BadStatusLine(version)
version = HttpVersion(int(match.group(1)), int(match.group(2)))
# read headers
headers, close, compression = self.parse_headers(lines)
if version <= HttpVersion10:
close = True
elif close is None:
close = False
out.feed_data(
RawRequestMessage(
method, path, version, headers, close, compression))
out.feed_eof()
class HttpResponseParser(HttpParser):
"""Read response status line and headers.
BadStatusLine could be raised in case of any errors in status line.
Returns RawResponseMessage"""
def __call__(self, out, buf):
# read http message (response line + headers)
try:
raw_data = yield from buf.readuntil(
b'\r\n\r\n', self.max_line_size + self.max_headers)
except errors.LineLimitExceededParserError as exc:
raise errors.LineTooLong(exc.limit) from None
lines = raw_data.decode(
'utf-8', 'surrogateescape').split('\r\n')
line = lines[0]
try:
version, status = line.split(None, 1)
except ValueError:
raise errors.BadStatusLine(line) from None
else:
try:
status, reason = status.split(None, 1)
except ValueError:
reason = ''
# version
match = VERSRE.match(version)
if match is None:
raise errors.BadStatusLine(line)
version = HttpVersion(int(match.group(1)), int(match.group(2)))
# The status code is a three-digit number
try:
status = int(status)
except ValueError:
raise errors.BadStatusLine(line) from None
if status < 100 or status > 999:
raise errors.BadStatusLine(line)
# read headers
headers, close, compression = self.parse_headers(lines)
if close is None:
close = version <= HttpVersion10
out.feed_data(
RawResponseMessage(
version, status, reason.strip(),
headers, close, compression))
out.feed_eof()
class HttpPayloadParser:
def __init__(self, message, length=None, compression=True,
readall=False, response_with_body=True):
self.message = message
self.length = length
self.compression = compression
self.readall = readall
self.response_with_body = response_with_body
def __call__(self, out, buf):
# payload params
length = self.message.headers.get(hdrs.CONTENT_LENGTH, self.length)
if hdrs.SEC_WEBSOCKET_KEY1 in self.message.headers:
length = 8
# payload decompression wrapper
if self.compression and self.message.compression:
out = DeflateBuffer(out, self.message.compression)
# payload parser
if not self.response_with_body:
# don't parse payload if it's not expected to be received
pass
elif 'chunked' in self.message.headers.get(
hdrs.TRANSFER_ENCODING, ''):
yield from self.parse_chunked_payload(out, buf)
elif length is not None:
try:
length = int(length)
except ValueError:
raise errors.InvalidHeader(hdrs.CONTENT_LENGTH) from None
if length < 0:
raise errors.InvalidHeader(hdrs.CONTENT_LENGTH)
elif length > 0:
yield from self.parse_length_payload(out, buf, length)
else:
if self.readall and getattr(self.message, 'code', 0) != 204:
yield from self.parse_eof_payload(out, buf)
elif getattr(self.message, 'method', None) in ('PUT', 'POST'):
internal_logger.warning( # pragma: no cover
'Content-Length or Transfer-Encoding header is required')
out.feed_eof()
def parse_chunked_payload(self, out, buf):
"""Chunked transfer encoding parser."""
while True:
# read next chunk size
line = yield from buf.readuntil(b'\r\n', 8192)
i = line.find(b';')
if i >= 0:
line = line[:i] # strip chunk-extensions
else:
line = line.strip()
try:
size = int(line, 16)
except ValueError:
raise errors.TransferEncodingError(line) from None
if size == 0: # eof marker
break
# read chunk and feed buffer
while size:
chunk = yield from buf.readsome(size)
out.feed_data(chunk)
size = size - len(chunk)
# toss the CRLF at the end of the chunk
yield from buf.skip(2)
# read and discard trailer up to the CRLF terminator
yield from buf.skipuntil(b'\r\n')
def parse_length_payload(self, out, buf, length=0):
"""Read specified amount of bytes."""
required = length
while required:
chunk = yield from buf.readsome(required)
out.feed_data(chunk)
required -= len(chunk)
def parse_eof_payload(self, out, buf):
"""Read all bytes until eof."""
try:
while True:
out.feed_data((yield from buf.readsome()))
except aiohttp.EofStream:
pass
class DeflateBuffer:
"""DeflateStream decompress stream and feed data into specified stream."""
def __init__(self, out, encoding):
self.out = out
zlib_mode = (16 + zlib.MAX_WBITS
if encoding == 'gzip' else -zlib.MAX_WBITS)
self.zlib = zlib.decompressobj(wbits=zlib_mode)
def feed_data(self, chunk):
try:
chunk = self.zlib.decompress(chunk)
except Exception:
raise errors.ContentEncodingError('deflate')
if chunk:
self.out.feed_data(chunk)
def feed_eof(self):
self.out.feed_data(self.zlib.flush())
if not self.zlib.eof:
raise errors.ContentEncodingError('deflate')
self.out.feed_eof()
def wrap_payload_filter(func):
"""Wraps payload filter and piped filters.
Filter is a generator that accepts arbitrary chunks of data,
modify data and emit new stream of data.
For example we have stream of chunks: ['1', '2', '3', '4', '5'],
we can apply chunking filter to this stream:
['1', '2', '3', '4', '5']
|
response.add_chunking_filter(2)
|
['12', '34', '5']
It is possible to use different filters at the same time.
For a example to compress incoming stream with 'deflate' encoding
and then split data and emit chunks of 8192 bytes size chunks:
>>> response.add_compression_filter('deflate')
>>> response.add_chunking_filter(8192)
Filters do not alter transfer encoding.
Filter can receive types types of data, bytes object or EOF_MARKER.
1. If filter receives bytes object, it should process data
and yield processed data then yield EOL_MARKER object.
2. If Filter received EOF_MARKER, it should yield remaining
data (buffered) and then yield EOF_MARKER.
"""
@functools.wraps(func)
def wrapper(self, *args, **kw):
new_filter = func(self, *args, **kw)
filter = self.filter
if filter is not None:
next(new_filter)
self.filter = filter_pipe(filter, new_filter)
else:
self.filter = new_filter
next(self.filter)
return wrapper
def filter_pipe(filter, filter2, *,
EOF_MARKER=EOF_MARKER, EOL_MARKER=EOL_MARKER):
"""Creates pipe between two filters.
filter_pipe() feeds first filter with incoming data and then
send yielded from first filter data into filter2, results of
filter2 are being emitted.
1. If filter_pipe receives bytes object, it sends it to the first filter.
2. Reads yielded values from the first filter until it receives
EOF_MARKER or EOL_MARKER.
3. Each of this values is being send to second filter.
4. Reads yielded values from second filter until it receives EOF_MARKER
or EOL_MARKER. Each of this values yields to writer.
"""
chunk = yield
while True:
eof = chunk is EOF_MARKER
chunk = filter.send(chunk)
while chunk is not EOL_MARKER:
chunk = filter2.send(chunk)
while chunk not in (EOF_MARKER, EOL_MARKER):
yield chunk
chunk = next(filter2)
if chunk is not EOF_MARKER:
if eof:
chunk = EOF_MARKER
else:
chunk = next(filter)
else:
break
chunk = yield EOL_MARKER
class HttpMessage:
"""HttpMessage allows to write headers and payload to a stream.
For example, lets say we want to read file then compress it with deflate
compression and then send it with chunked transfer encoding, code may look
like this:
>>> response = aiohttp.Response(transport, 200)
We have to use deflate compression first:
>>> response.add_compression_filter('deflate')
Then we want to split output stream into chunks of 1024 bytes size:
>>> response.add_chunking_filter(1024)
We can add headers to response with add_headers() method. add_headers()
does not send data to transport, send_headers() sends request/response
line and then sends headers:
>>> response.add_headers(
... ('Content-Disposition', 'attachment; filename="..."'))
>>> response.send_headers()
Now we can use chunked writer to write stream to a network stream.
First call to write() method sends response status line and headers,
add_header() and add_headers() method unavailable at this stage:
>>> with open('...', 'rb') as f:
... chunk = fp.read(8192)
... while chunk:
... response.write(chunk)
... chunk = fp.read(8192)
>>> response.write_eof()
"""
writer = None
# 'filter' is being used for altering write() behaviour,
# add_chunking_filter adds deflate/gzip compression and
# add_compression_filter splits incoming data into a chunks.
filter = None
HOP_HEADERS = None # Must be set by subclass.
SERVER_SOFTWARE = 'Python/{0[0]}.{0[1]} aiohttp/{1}'.format(
sys.version_info, aiohttp.__version__)
status = None
status_line = b''
upgrade = False # Connection: UPGRADE
websocket = False # Upgrade: WEBSOCKET
has_chunked_hdr = False # Transfer-encoding: chunked
# subclass can enable auto sending headers with write() call,
# this is useful for wsgi's start_response implementation.
_send_headers = False
def __init__(self, transport, version, close):
self.transport = transport
self.version = version
self.closing = close
# disable keep-alive for http/1.0
if version <= HttpVersion10:
self.keepalive = False
else:
self.keepalive = None
self.chunked = False
self.length = None
self.headers = CIMultiDict()
self.headers_sent = False
self.output_length = 0
self._output_size = 0
def force_close(self):
self.closing = True
self.keepalive = False
def enable_chunked_encoding(self):
self.chunked = True
def keep_alive(self):
if self.keepalive is None:
return not self.closing
else:
return self.keepalive
def is_headers_sent(self):
return self.headers_sent
def add_header(self, name, value):
"""Analyze headers. Calculate content length,
removes hop headers, etc."""
assert not self.headers_sent, 'headers have been sent already'
assert isinstance(name, str), \
'Header name should be a string, got {!r}'.format(name)
assert set(name).issubset(ASCIISET), \
'Header name should contain ASCII chars, got {!r}'.format(name)
assert isinstance(value, str), \
'Header {!r} should have string value, got {!r}'.format(
name, value)
name = name.strip().upper()
value = value.strip()
if name == hdrs.CONTENT_LENGTH:
self.length = int(value)
if name == hdrs.TRANSFER_ENCODING:
self.has_chunked_hdr = value.lower().strip() == 'chunked'
if name == hdrs.CONNECTION:
val = value.lower()
# handle websocket
if 'upgrade' in val:
self.upgrade = True
# connection keep-alive
elif 'close' in val:
self.keepalive = False
elif 'keep-alive' in val and self.version >= HttpVersion11:
self.keepalive = True
elif name == hdrs.UPGRADE:
if 'websocket' in value.lower():
self.websocket = True
self.headers[name] = value
elif name not in self.HOP_HEADERS:
# ignore hop-by-hop headers
self.headers.add(name, value)
def add_headers(self, *headers):
"""Adds headers to a http message."""
for name, value in headers:
self.add_header(name, value)
def send_headers(self):
"""Writes headers to a stream. Constructs payload writer."""
# Chunked response is only for HTTP/1.1 clients or newer
# and there is no Content-Length header is set.
# Do not use chunked responses when the response is guaranteed to
# not have a response body (304, 204).
assert not self.headers_sent, 'headers have been sent already'
self.headers_sent = True
if self.chunked or (self.length is None and
self.version >= HttpVersion11 and
self.status not in (304, 204)):
self.writer = self._write_chunked_payload()
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
elif self.length is not None:
self.writer = self._write_length_payload(self.length)
else:
self.writer = self._write_eof_payload()
next(self.writer)
self._add_default_headers()
# status + headers
headers = ''.join(itertools.chain(
(self.status_line,),
*((k, ': ', v, '\r\n')
for k, v in ((k, value)
for k, value in self.headers.items()))))
headers = headers.encode('utf-8') + b'\r\n'
self.output_length += len(headers)
self.transport.write(headers)
def _add_default_headers(self):
# set the connection header
if self.upgrade:
connection = 'upgrade'
elif not self.closing if self.keepalive is None else self.keepalive:
connection = 'keep-alive'
else:
connection = 'close'
self.headers[hdrs.CONNECTION] = connection
def write(self, chunk, *, EOF_MARKER=EOF_MARKER, EOL_MARKER=EOL_MARKER):
"""Writes chunk of data to a stream by using different writers.
writer uses filter to modify chunk of data.
write_eof() indicates end of stream.
writer can't be used after write_eof() method being called.
write() return drain future.
"""
assert (isinstance(chunk, (bytes, bytearray)) or
chunk is EOF_MARKER), chunk
size = self.output_length
if self._send_headers and not self.headers_sent:
self.send_headers()
assert self.writer is not None, 'send_headers() is not called.'
if self.filter:
chunk = self.filter.send(chunk)
while chunk not in (EOF_MARKER, EOL_MARKER):
self.writer.send(chunk)
chunk = next(self.filter)
else:
if chunk is not EOF_MARKER:
self.writer.send(chunk)
self._output_size += self.output_length - size
if self._output_size > 64 * 1024:
self._output_size = 0
return self.transport.drain()
else:
return ()
def write_eof(self):
self.write(EOF_MARKER)
try:
self.writer.throw(aiohttp.EofStream())
except StopIteration:
pass
return self.transport.drain()
def _write_chunked_payload(self):
"""Write data in chunked transfer encoding."""
while True:
try:
chunk = yield
except aiohttp.EofStream:
self.transport.write(b'0\r\n\r\n')
self.output_length += 5
break
chunk = bytes(chunk)
chunk_len = '{:x}\r\n'.format(len(chunk)).encode('ascii')
self.transport.write(chunk_len)
self.transport.write(chunk)
self.transport.write(b'\r\n')
self.output_length += len(chunk_len) + len(chunk) + 2
def _write_length_payload(self, length):
"""Write specified number of bytes to a stream."""
while True:
try:
chunk = yield
except aiohttp.EofStream:
break
if length:
l = len(chunk)
if length >= l:
self.transport.write(chunk)
self.output_length += len(chunk)
else:
self.transport.write(chunk[:length])
self.output_length += length
length = max(0, length-l)
def _write_eof_payload(self):
while True:
try:
chunk = yield
except aiohttp.EofStream:
break
self.transport.write(chunk)
self.output_length += len(chunk)
@wrap_payload_filter
def add_chunking_filter(self, chunk_size=16*1024, *,
EOF_MARKER=EOF_MARKER, EOL_MARKER=EOL_MARKER):
"""Split incoming stream into chunks."""
buf = bytearray()
chunk = yield
while True:
if chunk is EOF_MARKER:
if buf:
yield buf
yield EOF_MARKER
else:
buf.extend(chunk)
while len(buf) >= chunk_size:
chunk = bytes(buf[:chunk_size])
del buf[:chunk_size]
yield chunk
chunk = yield EOL_MARKER
@wrap_payload_filter
def add_compression_filter(self, encoding='deflate', *,
EOF_MARKER=EOF_MARKER, EOL_MARKER=EOL_MARKER):
"""Compress incoming stream with deflate or gzip encoding."""
zlib_mode = (16 + zlib.MAX_WBITS
if encoding == 'gzip' else -zlib.MAX_WBITS)
zcomp = zlib.compressobj(wbits=zlib_mode)
chunk = yield
while True:
if chunk is EOF_MARKER:
yield zcomp.flush()
chunk = yield EOF_MARKER
else:
yield zcomp.compress(chunk)
chunk = yield EOL_MARKER
class Response(HttpMessage):
"""Create http response message.
Transport is a socket stream transport. status is a response status code,
status has to be integer value. http_version is a tuple that represents
http version, (1, 0) stands for HTTP/1.0 and (1, 1) is for HTTP/1.1
"""
HOP_HEADERS = ()
@staticmethod
def calc_reason(status):
record = RESPONSES.get(status)
if record is not None:
reason = record[0]
else:
reason = str(status)
return reason
def __init__(self, transport, status,
http_version=HttpVersion11, close=False, reason=None):
super().__init__(transport, http_version, close)
self.status = status
if reason is None:
reason = self.calc_reason(status)
self.reason = reason
self.status_line = 'HTTP/{}.{} {} {}\r\n'.format(
http_version[0], http_version[1], status, reason)
def _add_default_headers(self):
super()._add_default_headers()
if hdrs.DATE not in self.headers:
# format_date_time(None) is quite expensive
self.headers.setdefault(hdrs.DATE, format_date_time(None))
self.headers.setdefault(hdrs.SERVER, self.SERVER_SOFTWARE)
class Request(HttpMessage):
HOP_HEADERS = ()
def __init__(self, transport, method, path,
http_version=HttpVersion11, close=False):
super().__init__(transport, http_version, close)
self.method = method
self.path = path
self.status_line = '{0} {1} HTTP/{2[0]}.{2[1]}\r\n'.format(
method, path, http_version)
def _add_default_headers(self):
super()._add_default_headers()
self.headers.setdefault(hdrs.USER_AGENT, self.SERVER_SOFTWARE)
|
import importlib
import logging
import sys
from unittest.mock import MagicMock, patch, PropertyMock
import pytest
from tools.base import runner
# this is necessary to fix coverage as these libs are imported before pytest
# is invoked
importlib.reload(runner)
class DummyRunner(runner.Runner):
def __init__(self):
self.args = PropertyMock()
class DummyForkingRunner(runner.ForkingRunner):
def __init__(self):
self.args = PropertyMock()
def test_runner_constructor():
run = runner.Runner("path1", "path2", "path3")
assert run._args == ("path1", "path2", "path3")
def test_runner_args():
run = runner.Runner("path1", "path2", "path3")
parser_mock = patch(
"tools.base.runner.Runner.parser",
new_callable=PropertyMock)
with parser_mock as m_parser:
assert run.args == m_parser.return_value.parse_known_args.return_value.__getitem__.return_value
assert (
list(m_parser.return_value.parse_known_args.call_args)
== [(('path1', 'path2', 'path3'),), {}])
assert (
list(m_parser.return_value.parse_known_args.return_value.__getitem__.call_args)
== [(0,), {}])
assert "args" in run.__dict__
def test_runner_extra_args():
run = runner.Runner("path1", "path2", "path3")
parser_mock = patch(
"tools.base.runner.Runner.parser",
new_callable=PropertyMock)
with parser_mock as m_parser:
assert run.extra_args == m_parser.return_value.parse_known_args.return_value.__getitem__.return_value
assert (
list(m_parser.return_value.parse_known_args.call_args)
== [(('path1', 'path2', 'path3'),), {}])
assert (
list(m_parser.return_value.parse_known_args.return_value.__getitem__.call_args)
== [(1,), {}])
assert "extra_args" in run.__dict__
def test_runner_log(patches):
run = runner.Runner("path1", "path2", "path3")
patched = patches(
"logging.getLogger",
"logging.StreamHandler",
"LogFilter",
("Runner.log_level", dict(new_callable=PropertyMock)),
("Runner.name", dict(new_callable=PropertyMock)),
prefix="tools.base.runner")
with patched as (m_logger, m_stream, m_filter, m_level, m_name):
loggers = (MagicMock(), MagicMock())
m_stream.side_effect = loggers
assert run.log == m_logger.return_value
assert (
list(m_logger.return_value.setLevel.call_args)
== [(m_level.return_value,), {}])
assert (
list(list(c) for c in m_stream.call_args_list)
== [[(sys.stdout,), {}],
[(sys.stderr,), {}]])
assert (
list(loggers[0].setLevel.call_args)
== [(logging.DEBUG,), {}])
assert (
list(loggers[0].addFilter.call_args)
== [(m_filter.return_value,), {}])
assert (
list(loggers[1].setLevel.call_args)
== [(logging.WARN,), {}])
assert (
list(list(c) for c in m_logger.return_value.addHandler.call_args_list)
== [[(loggers[0],), {}], [(loggers[1],), {}]])
assert "log" in run.__dict__
def test_runner_log_level(patches):
run = runner.Runner("path1", "path2", "path3")
patched = patches(
"dict",
("Runner.args", dict(new_callable=PropertyMock)),
prefix="tools.base.runner")
with patched as (m_dict, m_args):
assert run.log_level == m_dict.return_value.__getitem__.return_value
assert (
list(m_dict.call_args)
== [(runner.LOG_LEVELS, ), {}])
assert (
list(m_dict.return_value.__getitem__.call_args)
== [(m_args.return_value.log_level,), {}])
assert "log_level" in run.__dict__
def test_runner_name():
run = DummyRunner()
assert run.name == run.__class__.__name__
assert "name" not in run.__dict__
def test_runner_parser(patches):
run = runner.Runner("path1", "path2", "path3")
patched = patches(
"argparse.ArgumentParser",
"Runner.add_arguments",
prefix="tools.base.runner")
with patched as (m_parser, m_add_args):
assert run.parser == m_parser.return_value
assert (
list(m_parser.call_args)
== [(), {"allow_abbrev": False}])
assert (
list(m_add_args.call_args)
== [(m_parser.return_value,), {}])
assert "parser" in run.__dict__
def test_checker_path():
run = runner.Runner("path1", "path2", "path3")
cwd_mock = patch("tools.base.runner.os.getcwd")
with cwd_mock as m_cwd:
assert run.path == m_cwd.return_value
assert (
list(m_cwd.call_args)
== [(), {}])
def test_runner_add_arguments(patches):
run = runner.Runner("path1", "path2", "path3")
assert run.add_arguments("PARSER") is None
# LogFilter tests
@pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"])
def test_runner_log_filter(level):
logfilter = runner.LogFilter()
class DummyRecord(object):
levelno = level
if level in [logging.DEBUG, logging.INFO]:
assert logfilter.filter(DummyRecord())
else:
assert not logfilter.filter(DummyRecord())
# BazelAdapter tests
def test_bazeladapter_constructor():
run = DummyRunner()
adapter = runner.BazelAdapter(run)
assert adapter.context == run
@pytest.mark.parametrize("query_returns", [0, 1])
def test_bazeladapter_query(query_returns):
run = DummyForkingRunner()
adapter = runner.BazelAdapter(run)
fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run")
with fork_mock as m_fork:
m_fork.return_value.returncode = query_returns
if query_returns:
with pytest.raises(runner.BazelRunError) as result:
adapter.query("BAZEL QUERY")
else:
result = adapter.query("BAZEL QUERY")
assert (
list(m_fork.call_args)
== [(['bazel', 'query', "'BAZEL QUERY'"],), {}])
if query_returns:
assert result.errisinstance(runner.BazelRunError)
assert (
result.value.args
== (f"Bazel query failed: {m_fork.return_value}",))
assert not m_fork.return_value.stdout.decode.called
else:
assert (
result
== m_fork.return_value.stdout.decode.return_value.split.return_value)
assert (
list(m_fork.return_value.stdout.decode.call_args)
== [('utf-8',), {}])
assert (
list(m_fork.return_value.stdout.decode.return_value.split.call_args)
== [('\n',), {}])
@pytest.mark.parametrize("cwd", [None, "", "SOMEPATH"])
@pytest.mark.parametrize("raises", [None, True, False])
@pytest.mark.parametrize("capture_output", [None, True, False])
@pytest.mark.parametrize("run_returns", [0, 1])
@pytest.mark.parametrize("args", [(), ("foo",), ("foo", "bar")])
def test_bazeladapter_run(patches, run_returns, cwd, raises, args, capture_output):
run = DummyForkingRunner()
adapter = runner.BazelAdapter(run)
patched = patches(
"ForkingAdapter.subproc_run",
("ForkingRunner.path", dict(new_callable=PropertyMock)),
prefix="tools.base.runner")
adapter_args = ("BAZEL RUN",) + args
kwargs = {}
if raises is not None:
kwargs["raises"] = raises
if cwd is not None:
kwargs["cwd"] = cwd
if capture_output is not None:
kwargs["capture_output"] = capture_output
with patched as (m_fork, m_path):
m_fork.return_value.returncode = run_returns
if run_returns and (raises is not False):
with pytest.raises(runner.BazelRunError) as result:
adapter.run(*adapter_args, **kwargs)
else:
result = adapter.run(*adapter_args, **kwargs)
call_args = (("--",) + args) if args else args
bazel_args = ("bazel", "run", "BAZEL RUN") + call_args
bazel_kwargs = {}
bazel_kwargs["capture_output"] = (
True
if capture_output is True
else False)
bazel_kwargs["cwd"] = (
cwd
if cwd
else m_path.return_value)
assert (
list(m_fork.call_args)
== [(bazel_args,), bazel_kwargs])
if run_returns and (raises is not False):
assert result.errisinstance(runner.BazelRunError)
assert (
result.value.args
== (f"Bazel run failed: {m_fork.return_value}",))
else:
assert result == m_fork.return_value
# ForkingAdapter tests
def test_forkingadapter_constructor():
run = DummyRunner()
adapter = runner.ForkingAdapter(run)
assert adapter.context == run
def test_forkingadapter_call():
run = DummyRunner()
adapter = runner.ForkingAdapter(run)
fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run")
with fork_mock as m_fork:
assert (
adapter(
"arg1", "arg2", "arg3",
kwa1="foo",
kwa2="bar",
kwa3="baz")
== m_fork.return_value)
assert (
list(m_fork.call_args)
== [('arg1', 'arg2', 'arg3'),
{'kwa1': 'foo', 'kwa2': 'bar', 'kwa3': 'baz'}])
@pytest.mark.parametrize("args", [(), ("a", "b")])
@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"])
@pytest.mark.parametrize("capture_output", ["NONE", True, False])
def test_forkingadapter_subproc_run(patches, args, cwd, capture_output):
adapter = runner.ForkingAdapter(DummyRunner())
patched = patches(
"subprocess.run",
("Runner.path", dict(new_callable=PropertyMock)),
prefix="tools.base.runner")
with patched as (m_run, m_path):
kwargs = {}
if cwd != "NONE":
kwargs["cwd"] = cwd
if capture_output != "NONE":
kwargs["capture_output"] = capture_output
assert adapter.subproc_run(*args, **kwargs) == m_run.return_value
expected = {'capture_output': True, 'cwd': cwd}
if capture_output is False:
expected["capture_output"] = False
if cwd == "NONE":
expected["cwd"] = m_path.return_value
assert (
list(m_run.call_args)
== [args, expected])
# ForkingRunner tests
def test_forkingrunner_fork():
run = runner.ForkingRunner("path1", "path2", "path3")
forking_mock = patch("tools.base.runner.ForkingAdapter")
with forking_mock as m_fork:
assert run.subproc_run == m_fork.return_value
assert (
list(m_fork.call_args)
== [(run,), {}])
assert "subproc_run" in run.__dict__
# BazelRunner tests
def test_bazelrunner_bazel():
run = runner.BazelRunner("path1", "path2", "path3")
bazel_mock = patch("tools.base.runner.BazelAdapter")
with bazel_mock as m_bazel:
assert run.bazel == m_bazel.return_value
assert (
list(m_bazel.call_args)
== [(run,), {}])
assert "bazel" in run.__dict__
|
import sys
import unittest
from rope.base import exceptions
from rope.base.pycore import _TextChangeDetector
from rope.base.pyobjects import get_base_type, AbstractFunction
from ropetest import testutils
class PyCoreTest(unittest.TestCase):
def setUp(self):
super(PyCoreTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super(PyCoreTest, self).tearDown()
def test_simple_module(self):
testutils.create_module(self.project, 'mod')
result = self.pycore.get_module('mod')
self.assertEquals(get_base_type('Module'), result.type)
self.assertEquals(0, len(result.get_attributes()))
def test_nested_modules(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod', pkg)
package = self.pycore.get_module('pkg')
self.assertEquals(get_base_type('Module'), package.get_type())
self.assertEquals(1, len(package.get_attributes()))
module = package['mod'].get_object()
self.assertEquals(get_base_type('Module'), module.get_type())
def test_package(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod', pkg)
result = self.pycore.get_module('pkg')
self.assertEquals(get_base_type('Module'), result.type)
def test_simple_class(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('class SampleClass(object):\n pass\n')
mod_element = self.pycore.get_module('mod')
result = mod_element['SampleClass'].get_object()
self.assertEquals(get_base_type('Type'), result.get_type())
def test_simple_function(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('def sample_function():\n pass\n')
mod_element = self.pycore.get_module('mod')
result = mod_element['sample_function'].get_object()
self.assertEquals(get_base_type('Function'), result.get_type())
def test_class_methods(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class SampleClass(object):\n' \
' def sample_method(self):\n' \
' pass\n'
mod.write(code)
mod_element = self.pycore.get_module('mod')
sample_class = mod_element['SampleClass'].get_object()
self.assertTrue('sample_method' in sample_class)
method = sample_class['sample_method'].get_object()
self.assertEquals(get_base_type('Function'), method.get_type())
def test_global_variables(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('var = 10')
mod_element = self.pycore.get_module('mod')
result = mod_element['var']
def test_class_variables(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('class SampleClass(object):\n var = 10\n')
mod_element = self.pycore.get_module('mod')
sample_class = mod_element['SampleClass'].get_object()
var = sample_class['var']
def test_class_attributes_set_in_init(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('class C(object):\n'
' def __init__(self):\n self.var = 20\n')
mod_element = self.pycore.get_module('mod')
sample_class = mod_element['C'].get_object()
var = sample_class['var']
def test_class_attributes_set_in_init_overwriting_a_defined(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def __init__(self):\n' \
' self.f = 20\n' \
' def f():\n' \
' pass\n'
mod.write(code)
mod_element = self.pycore.get_module('mod')
sample_class = mod_element['C'].get_object()
f = sample_class['f'].get_object()
self.assertTrue(isinstance(f, AbstractFunction))
def test_classes_inside_other_classes(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class SampleClass(object):\n' \
' class InnerClass(object):\n' \
' pass\n\n'
mod.write(code)
mod_element = self.pycore.get_module('mod')
sample_class = mod_element['SampleClass'].get_object()
var = sample_class['InnerClass'].get_object()
self.assertEquals(get_base_type('Type'), var.get_type())
@testutils.assert_raises(exceptions.ModuleNotFoundError)
def test_non_existent_module(self):
self.pycore.get_module('doesnotexistmodule')
def test_imported_names(self):
testutils.create_module(self.project, 'mod1')
mod = testutils.create_module(self.project, 'mod2')
mod.write('import mod1\n')
module = self.pycore.get_module('mod2')
imported_sys = module['mod1'].get_object()
self.assertEquals(get_base_type('Module'), imported_sys.get_type())
def test_imported_as_names(self):
testutils.create_module(self.project, 'mod1')
mod = testutils.create_module(self.project, 'mod2')
mod.write('import mod1 as my_import\n')
module = self.pycore.get_module('mod2')
imported_mod = module['my_import'].get_object()
self.assertEquals(get_base_type('Module'), imported_mod.get_type())
def test_get_string_module(self):
mod = self.pycore.get_string_module('class Sample(object):\n pass\n')
sample_class = mod['Sample'].get_object()
self.assertEquals(get_base_type('Type'), sample_class.get_type())
def test_get_string_module_with_extra_spaces(self):
mod = self.pycore.get_string_module('a = 10\n ')
def test_parameter_info_for_functions(self):
code = 'def func(param1, param2=10, *param3, **param4):\n pass'
mod = self.pycore.get_string_module(code)
sample_function = mod['func']
self.assertEquals(['param1', 'param2', 'param3', 'param4'],
sample_function.get_object().get_param_names())
# FIXME: Not found modules
def xxx_test_not_found_module_is_module(self):
mod = self.pycore.get_string_module('import doesnotexist\n')
self.assertEquals(get_base_type('Module'),
mod['doesnotexist'].
get_object().get_type())
def test_mixing_scopes_and_objects_hierarchy(self):
mod = self.pycore.get_string_module('var = 200\n')
scope = mod.get_scope()
self.assertTrue('var' in scope.get_names())
def test_inheriting_base_class_attributes(self):
code = 'class Base(object):\n' \
' def method(self):\n' \
' pass\n' \
'class Derived(Base):\n' \
' pass\n'
mod = self.pycore.get_string_module(code)
derived = mod['Derived'].get_object()
self.assertTrue('method' in derived)
self.assertEquals(get_base_type('Function'),
derived['method'].get_object().get_type())
def test_inheriting_multiple_base_class_attributes(self):
code = 'class Base1(object):\n def method1(self):\n pass\n' \
'class Base2(object):\n def method2(self):\n pass\n' \
'class Derived(Base1, Base2):\n pass\n'
mod = self.pycore.get_string_module(code)
derived = mod['Derived'].get_object()
self.assertTrue('method1' in derived)
self.assertTrue('method2' in derived)
def test_inheriting_multiple_base_class_attributes_with_the_same_name(self):
code = 'class Base1(object):\n def method(self):\n pass\n' \
'class Base2(object):\n def method(self):\n pass\n' \
'class Derived(Base1, Base2):\n pass\n'
mod = self.pycore.get_string_module(code)
base1 = mod['Base1'].get_object()
derived = mod['Derived'].get_object()
self.assertEquals(base1['method'].get_object(),
derived['method'].get_object())
def test_inheriting_unknown_base_class(self):
code = 'class Derived(NotFound):\n' \
' def f(self):\n' \
' pass\n'
mod = self.pycore.get_string_module(code)
derived = mod['Derived'].get_object()
self.assertTrue('f' in derived)
def test_module_creation(self):
new_module = testutils.create_module(self.project, 'module')
self.assertFalse(new_module.is_folder())
self.assertEquals(self.project.get_resource('module.py'), new_module)
def test_packaged_module_creation(self):
package = self.project.root.create_folder('package')
new_module = testutils.create_module(self.project, 'package.module')
self.assertEquals(self.project.get_resource('package/module.py'), new_module)
def test_packaged_module_creation_with_nested_src(self):
src = self.project.root.create_folder('src')
package = src.create_folder('pkg')
new_module = testutils.create_module(self.project, 'pkg.mod', src)
self.assertEquals(self.project.get_resource('src/pkg/mod.py'), new_module)
def test_package_creation(self):
new_package = testutils.create_package(self.project, 'pkg')
self.assertTrue(new_package.is_folder())
self.assertEquals(self.project.get_resource('pkg'), new_package)
self.assertEquals(self.project.get_resource('pkg/__init__.py'),
new_package.get_child('__init__.py'));
def test_nested_package_creation(self):
package = testutils.create_package(self.project, 'pkg1')
nested_package = testutils.create_package(self.project, 'pkg1.pkg2')
self.assertEquals(self.project.get_resource('pkg1/pkg2'), nested_package)
def test_packaged_package_creation_with_nested_src(self):
src = self.project.root.create_folder('src')
package = testutils.create_package(self.project, 'pkg1', src)
nested_package = testutils.create_package(self.project, 'pkg1.pkg2', src)
self.assertEquals(self.project.get_resource('src/pkg1/pkg2'), nested_package)
def test_find_module(self):
src = self.project.root.create_folder('src')
samplemod = testutils.create_module(self.project, 'samplemod', src)
found_module = self.pycore.find_module('samplemod')
self.assertEquals(samplemod, found_module)
def test_find_nested_module(self):
src = self.project.root.create_folder('src')
samplepkg = testutils.create_package(self.project, 'samplepkg', src)
samplemod = testutils.create_module(self.project, 'samplemod', samplepkg)
found_module = self.pycore.find_module('samplepkg.samplemod')
self.assertEquals(samplemod, found_module)
def test_find_multiple_module(self):
src = self.project.root.create_folder('src')
samplemod1 = testutils.create_module(self.project, 'samplemod', src)
samplemod2 = testutils.create_module(self.project, 'samplemod')
test = self.project.root.create_folder('test')
samplemod3 = testutils.create_module(self.project, 'samplemod', test)
found_module = self.pycore.find_module('samplemod')
self.assertTrue(samplemod1 == found_module or
samplemod2 == found_module or
samplemod3 == found_module)
def test_find_module_packages(self):
src = self.project.root
samplepkg = testutils.create_package(self.project, 'samplepkg', src)
found_module = self.pycore.find_module('samplepkg')
self.assertEquals(samplepkg, found_module)
def test_find_module_when_module_and_package_with_the_same_name(self):
src = self.project.root
samplemod = testutils.create_module(self.project, 'sample', src)
samplepkg = testutils.create_package(self.project, 'sample', src)
found_module = self.pycore.find_module('sample')
self.assertEquals(samplepkg, found_module)
def test_source_folders_preference(self):
pkg1 = testutils.create_package(self.project, 'pkg1')
src2 = testutils.create_package(self.project, 'pkg1.src2')
lost = testutils.create_module(self.project, 'pkg1.src2.lost')
self.assertEqual(self.project.pycore.find_module('lost'), None)
self.project.close()
from rope.base.project import Project
self.project = Project(self.project.address,
source_folders=['pkg1/src2'])
self.assertEqual(self.project.pycore.find_module('lost'), lost)
def test_getting_empty_source_folders(self):
self.assertEquals([], self.pycore.get_source_folders())
def test_root_source_folder(self):
self.project.root.create_file('sample.py')
source_folders = self.pycore.get_source_folders()
self.assertEquals(1, len(source_folders))
self.assertTrue(self.project.root in source_folders)
def test_root_source_folder2(self):
self.project.root.create_file('mod1.py')
self.project.root.create_file('mod2.py')
source_folders = self.pycore.get_source_folders()
self.assertEquals(1, len(source_folders))
self.assertTrue(self.project.root in source_folders)
def test_src_source_folder(self):
src = self.project.root.create_folder('src')
src.create_file('sample.py')
source_folders = self.pycore.get_source_folders()
self.assertEquals(1, len(source_folders))
self.assertTrue(self.project.get_resource('src') in source_folders)
def test_packages(self):
src = self.project.root.create_folder('src')
pkg = src.create_folder('package')
pkg.create_file('__init__.py')
source_folders = self.pycore.get_source_folders()
self.assertEquals(1, len(source_folders))
self.assertTrue(src in source_folders)
def test_multi_source_folders(self):
src = self.project.root.create_folder('src')
package = src.create_folder('package')
package.create_file('__init__.py')
test = self.project.root.create_folder('test')
test.create_file('alltests.py')
source_folders = self.pycore.get_source_folders()
self.assertEquals(2, len(source_folders))
self.assertTrue(src in source_folders)
self.assertTrue(test in source_folders)
def test_multi_source_folders2(self):
mod1 = testutils.create_module(self.project, 'mod1')
src = self.project.root.create_folder('src')
package = testutils.create_package(self.project, 'package', src)
mod2 = testutils.create_module(self.project, 'mod2', package)
source_folders = self.pycore.get_source_folders()
self.assertEquals(2, len(source_folders))
self.assertTrue(self.project.root in source_folders and \
src in source_folders)
def test_get_pyname_definition_location(self):
mod = self.pycore.get_string_module('a_var = 20\n')
a_var = mod['a_var']
self.assertEquals((mod, 1), a_var.get_definition_location())
def test_get_pyname_definition_location_functions(self):
mod = self.pycore.get_string_module('def a_func():\n pass\n')
a_func = mod['a_func']
self.assertEquals((mod, 1), a_func.get_definition_location())
def test_get_pyname_definition_location_class(self):
code = 'class AClass(object):\n pass\n\n'
mod = self.pycore.get_string_module(code)
a_class = mod['AClass']
self.assertEquals((mod, 1), a_class.get_definition_location())
def test_get_pyname_definition_location_local_variables(self):
mod = self.pycore.get_string_module('def a_func():\n a_var = 10\n')
a_func_scope = mod.get_scope().get_scopes()[0]
a_var = a_func_scope['a_var']
self.assertEquals((mod, 2), a_var.get_definition_location())
def test_get_pyname_definition_location_reassigning(self):
mod = self.pycore.get_string_module('a_var = 20\na_var=30\n')
a_var = mod['a_var']
self.assertEquals((mod, 1), a_var.get_definition_location())
def test_get_pyname_definition_location_importes(self):
module = testutils.create_module(self.project, 'mod')
mod = self.pycore.get_string_module('import mod\n')
imported_module = self.pycore.get_module('mod')
module_pyname = mod['mod']
self.assertEquals((imported_module, 1),
module_pyname.get_definition_location())
def test_get_pyname_definition_location_imports(self):
module_resource = testutils.create_module(self.project, 'mod')
module_resource.write('\ndef a_func():\n pass\n')
imported_module = self.pycore.get_module('mod')
mod = self.pycore.get_string_module('from mod import a_func\n')
a_func = mod['a_func']
self.assertEquals((imported_module, 2), a_func.get_definition_location())
def test_get_pyname_definition_location_parameters(self):
code = 'def a_func(param1, param2):\n a_var = param\n'
mod = self.pycore.get_string_module(code)
a_func_scope = mod.get_scope().get_scopes()[0]
param1 = a_func_scope['param1']
self.assertEquals((mod, 1), param1.get_definition_location())
param2 = a_func_scope['param2']
self.assertEquals((mod, 1), param2.get_definition_location())
def test_module_get_resource(self):
module_resource = testutils.create_module(self.project, 'mod')
module = self.pycore.get_module('mod')
self.assertEquals(module_resource, module.get_resource())
string_module = self.pycore.get_string_module('from mod import a_func\n')
self.assertEquals(None, string_module.get_resource())
def test_get_pyname_definition_location_class2(self):
code = 'class AClass(object):\n' \
' def __init__(self):\n' \
' self.an_attr = 10\n'
mod = self.pycore.get_string_module(code)
a_class = mod['AClass'].get_object()
an_attr = a_class['an_attr']
self.assertEquals((mod, 3), an_attr.get_definition_location())
def test_import_not_found_module_get_definition_location(self):
mod = self.pycore.get_string_module('import doesnotexist\n')
does_not_exist = mod['doesnotexist']
self.assertEquals((None, None), does_not_exist.get_definition_location())
def test_from_not_found_module_get_definition_location(self):
mod = self.pycore.get_string_module('from doesnotexist import Sample\n')
sample = mod['Sample']
self.assertEquals((None, None), sample.get_definition_location())
def test_from_package_import_module_get_definition_location(self):
pkg = testutils.create_package(self.project, 'pkg')
testutils.create_module(self.project, 'mod', pkg)
pkg_mod = self.pycore.get_module('pkg.mod')
mod = self.pycore.get_string_module('from pkg import mod\n')
imported_mod = mod['mod']
self.assertEquals((pkg_mod, 1),
imported_mod.get_definition_location())
def test_get_module_for_defined_pyobjects(self):
mod = self.pycore.get_string_module('class AClass(object):\n pass\n')
a_class = mod['AClass'].get_object()
self.assertEquals(mod, a_class.get_module())
def test_get_definition_location_for_packages(self):
pkg = testutils.create_package(self.project, 'pkg')
init_module = self.pycore.get_module('pkg.__init__')
mod = self.pycore.get_string_module('import pkg\n')
pkg_pyname = mod['pkg']
self.assertEquals((init_module, 1), pkg_pyname.get_definition_location())
def test_get_definition_location_for_filtered_packages(self):
pkg = testutils.create_package(self.project, 'pkg')
testutils.create_module(self.project, 'mod', pkg)
init_module = self.pycore.get_module('pkg.__init__')
mod = self.pycore.get_string_module('import pkg.mod')
pkg_pyname = mod['pkg']
self.assertEquals((init_module, 1), pkg_pyname.get_definition_location())
def test_out_of_project_modules(self):
scope = self.pycore.get_string_scope('import rope.base.project as project\n')
imported_module = scope['project'].get_object()
self.assertTrue('Project' in imported_module)
def test_file_encoding_reading(self):
contents = u'# -*- coding: utf-8 -*-\n#\N{LATIN SMALL LETTER I WITH DIAERESIS}\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(contents)
self.pycore.get_module('mod')
def test_global_keyword(self):
contents = 'a_var = 1\ndef a_func():\n global a_var\n'
mod = self.pycore.get_string_module(contents)
global_var = mod['a_var']
func_scope = mod['a_func'].get_object().get_scope()
local_var = func_scope['a_var']
self.assertEquals(global_var, local_var)
def test_not_leaking_for_vars_inside_parent_scope(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def f(self):\n' \
' for my_var1, my_var2 in []:\n' \
' pass\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
c_class = pymod['C'].get_object()
self.assertFalse('my_var1' in c_class)
self.assertFalse('my_var2' in c_class)
def test_not_leaking_for_vars_inside_parent_scope2(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def f(self):\n' \
' for my_var in []:\n' \
' pass\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
c_class = pymod['C'].get_object()
self.assertFalse('my_var' in c_class)
def test_variables_defined_in_excepts(self):
mod = testutils.create_module(self.project, 'mod')
code = 'try:\n' \
' myvar1 = 1\n' \
'except:\n' \
' myvar2 = 1\n' \
'finally:\n' \
' myvar3 = 1\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
self.assertTrue('myvar1' in pymod)
self.assertTrue('myvar2' in pymod)
self.assertTrue('myvar3' in pymod)
def test_not_leaking_tuple_assigned_names_inside_parent_scope(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def f(self):\n' \
' var1, var2 = range(2)\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
c_class = pymod['C'].get_object()
self.assertFalse('var1' in c_class)
@testutils.run_only_for_25
def test_with_statement_variables(self):
code = 'import threading\nwith threading.lock() as var: pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = self.pycore.get_string_module(code)
self.assertTrue('var' in pymod)
@testutils.run_only_for_25
def test_with_statement_variables_and_tuple_assignment(self):
code = 'class A(object):\n' \
' def __enter__(self):' \
' return (1, 2)\n' \
' def __exit__(self, type, value, tb):\n' \
' pass\n'\
'with A() as (a, b):\n' \
' pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = self.pycore.get_string_module(code)
self.assertTrue('a' in pymod)
self.assertTrue('b' in pymod)
@testutils.run_only_for_25
def test_with_statement_variable_type(self):
code = 'class A(object):\n' \
' def __enter__(self):\n' \
' return self\n'\
' def __exit__(self, type, value, tb):\n' \
' pass\n' \
'with A() as var:\n' \
' pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = self.pycore.get_string_module(code)
a_class = pymod['A'].get_object()
var = pymod['var'].get_object()
self.assertEquals(a_class, var.get_type())
@testutils.run_only_for_25
def test_with_statement_with_no_vars(self):
code = 'with open("file"): pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = self.pycore.get_string_module(code)
pymod.get_attributes()
def test_check_for_else_block(self):
code = 'for i in range(10):\n' \
' pass\n' \
'else:\n' \
' myvar = 1\n'
mod = self.pycore.get_string_module(code)
a_var = mod['myvar']
self.assertEquals((mod, 4), a_var.get_definition_location())
def test_check_names_defined_in_whiles(self):
mod = self.pycore.get_string_module('while False:\n myvar = 1\n')
a_var = mod['myvar']
self.assertEquals((mod, 2), a_var.get_definition_location())
def test_get_definition_location_in_tuple_assnames(self):
mod = self.pycore.get_string_module(
'def f(x):\n x.z, a = range(2)\n')
x = mod['f'].get_object().get_scope()['x']
a = mod['f'].get_object().get_scope()['a']
self.assertEquals((mod, 1), x.get_definition_location())
self.assertEquals((mod, 2), a.get_definition_location())
@testutils.assert_raises(exceptions.ModuleSyntaxError)
def test_syntax_errors_in_code(self):
mod = self.pycore.get_string_module('xyx print\n')
def test_holding_error_location_information(self):
try:
mod = self.pycore.get_string_module('xyx print\n')
except exceptions.ModuleSyntaxError, e:
self.assertEquals(1, e.lineno)
def test_no_exceptions_on_module_encoding_problems(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\nsdsdsd\n\xa9\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
mod.read()
@testutils.assert_raises(exceptions.ModuleSyntaxError)
def test_syntax_errors_when_cannot_decode_file2(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\n\xa9\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
self.pycore.resource_to_pyobject(mod)
@testutils.assert_raises(exceptions.ModuleSyntaxError)
def test_syntax_errors_when_null_bytes(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\n\x00\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
self.pycore.resource_to_pyobject(mod)
@testutils.assert_raises(exceptions.ModuleSyntaxError)
def test_syntax_errors_when_bad_strs(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\n"\\x0"\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
self.pycore.resource_to_pyobject(mod)
def test_not_reaching_maximum_recursions_with_from_star_imports(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('from mod2 import *\n')
mod2.write('from mod1 import *\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
pymod1.get_attributes()
def test_not_reaching_maximum_recursions_when_importing_variables(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('from mod2 import myvar\n')
mod2.write('from mod1 import myvar\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
pymod1['myvar'].get_object()
def test_not_reaching_maximum_recursions_when_importing_variables2(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('from mod1 import myvar\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
pymod1['myvar'].get_object()
def test_pyobject_equality_should_compare_types(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('var1 = ""\nvar2 = ""\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
self.assertEquals(pymod1['var1'].get_object(),
pymod1['var2'].get_object())
class PyCoreInProjectsTest(unittest.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
samplemod = testutils.create_module(self.project, 'samplemod')
code = 'class SampleClass(object):\n' \
' def sample_method():\n' \
' pass\n\n' \
'def sample_func():\n' \
' pass\n' \
'sample_var = 10\n\n' \
'def _underlined_func():\n' \
' pass\n\n'
samplemod.write(code)
package = testutils.create_package(self.project, 'package')
nestedmod = testutils.create_module(self.project, 'nestedmod', package)
def tearDown(self):
testutils.remove_project(self.project)
super(self.__class__, self).tearDown()
def test_simple_import(self):
mod = self.pycore.get_string_module('import samplemod\n')
samplemod = mod['samplemod'].get_object()
self.assertEquals(get_base_type('Module'), samplemod.get_type())
def test_from_import_class(self):
mod = self.pycore.get_string_module('from samplemod import SampleClass\n')
result = mod['SampleClass'].get_object()
self.assertEquals(get_base_type('Type'), result.get_type())
self.assertTrue('sample_func' not in mod.get_attributes())
def test_from_import_star(self):
mod = self.pycore.get_string_module('from samplemod import *\n')
self.assertEquals(get_base_type('Type'),
mod['SampleClass'].get_object().get_type())
self.assertEquals(get_base_type('Function'),
mod['sample_func'].get_object().get_type())
self.assertTrue(mod['sample_var'] is not None)
def test_from_import_star_overwriting(self):
code = 'from samplemod import *\n' \
'class SampleClass(object):\n pass\n'
mod = self.pycore.get_string_module(code)
samplemod = self.pycore.get_module('samplemod')
sample_class = samplemod['SampleClass'].get_object()
self.assertNotEquals(sample_class,
mod.get_attributes()['SampleClass'].get_object())
def test_from_import_star_not_imporing_underlined(self):
mod = self.pycore.get_string_module('from samplemod import *')
self.assertTrue('_underlined_func' not in mod.get_attributes())
def test_from_import_star_imports_in_functions(self):
mod = self.pycore.get_string_module('def f():\n from os import *\n')
mod['f'].get_object().get_scope().get_names()
def test_from_package_import_mod(self):
mod = self.pycore.get_string_module('from package import nestedmod\n')
self.assertEquals(get_base_type('Module'),
mod['nestedmod'].get_object().get_type())
# XXX: Deciding to import everything on import start from packages
def xxx_test_from_package_import_star(self):
mod = self.pycore.get_string_module('from package import *\n')
self.assertTrue('nestedmod' not in mod.get_attributes())
def test_unknown_when_module_cannot_be_found(self):
mod = self.pycore.get_string_module('from doesnotexist import nestedmod\n')
self.assertTrue('nestedmod' in mod)
def test_from_import_function(self):
code = 'def f():\n from samplemod import SampleClass\n'
scope = self.pycore.get_string_scope(code)
self.assertEquals(get_base_type('Type'),
scope.get_scopes()[0]['SampleClass'].
get_object().get_type())
def test_circular_imports(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('import mod2\n')
mod2.write('import mod1\n')
module1 = self.pycore.get_module('mod1')
def test_circular_imports2(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('from mod2 import Sample2\nclass Sample1(object):\n pass\n')
mod2.write('from mod1 import Sample1\nclass Sample2(object):\n pass\n')
module1 = self.pycore.get_module('mod1').get_attributes()
def test_multi_dot_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
pkg_mod = testutils.create_module(self.project, 'mod', pkg)
pkg_mod.write('def sample_func():\n pass\n')
mod = self.pycore.get_string_module('import pkg.mod\n')
self.assertTrue('pkg' in mod)
self.assertTrue('sample_func' in mod['pkg'].get_object()['mod'].
get_object())
def test_multi_dot_imports2(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod = self.pycore.get_string_module('import pkg.mod1\nimport pkg.mod2\n')
package = mod['pkg'].get_object()
self.assertEquals(2, len(package.get_attributes()))
self.assertTrue('mod1' in package and
'mod2' in package)
def test_multi_dot_imports3(self):
pkg1 = testutils.create_package(self.project, 'pkg1')
pkg2 = testutils.create_package(self.project, 'pkg2', pkg1)
mod1 = testutils.create_module(self.project, 'mod1', pkg2)
mod2 = testutils.create_module(self.project, 'mod2', pkg2)
code = 'import pkg1.pkg2.mod1\nimport pkg1.pkg2.mod2\n'
mod = self.pycore.get_string_module(code)
package1 = mod['pkg1'].get_object()
package2 = package1['pkg2'].get_object()
self.assertEquals(2, len(package2.get_attributes()))
self.assertTrue('mod1' in package2 and 'mod2' in package2)
def test_multi_dot_imports_as(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod1.write('def f():\n pass\n')
mod = self.pycore.get_string_module('import pkg.mod1 as mod1\n')
module = mod['mod1'].get_object()
self.assertTrue('f' in module)
# TODO: not showing unimported names as attributes of packages
def xxx_test_from_package_import_package(self):
pkg1 = testutils.create_package(self.project, 'pkg1')
pkg2 = testutils.create_package(self.project, 'pkg2', pkg1)
module = testutils.create_module(self.project, 'mod', pkg2)
mod = self.pycore.get_string_module('from pkg1 import pkg2\n')
package = mod['pkg2']
self.assertEquals(0, len(package.get_attributes()))
def test_invalidating_cache_after_resource_change(self):
module = testutils.create_module(self.project, 'mod')
module.write('import sys\n')
mod1 = self.pycore.get_module('mod')
self.assertTrue('var' not in mod1.get_attributes())
module.write('var = 10\n')
mod2 = self.pycore.get_module('mod')
self.assertTrue('var' in mod2)
def test_invalidating_cache_after_resource_change_for_init_dot_pys(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod')
init_dot_py = pkg.get_child('__init__.py')
init_dot_py.write('a_var = 10\n')
mod.write('import pkg\n')
pymod = self.pycore.get_module('mod')
self.assertTrue('a_var' in pymod['pkg'].get_object())
init_dot_py.write('new_var = 10\n')
self.assertTrue('a_var' not in pymod['pkg'].get_object().get_attributes())
def test_invalidating_cache_after_resource_change_for_nested_init_dot_pys(self):
pkg1 = testutils.create_package(self.project, 'pkg1')
pkg2 = testutils.create_package(self.project, 'pkg2', pkg1)
mod = testutils.create_module(self.project, 'mod')
init_dot_py = pkg2.get_child('__init__.py')
init_dot_py.write('a_var = 10\n')
mod.write('import pkg1\n')
pymod = self.pycore.get_module('mod')
self.assertTrue('a_var' in pymod['pkg1'].get_object()['pkg2'].get_object())
init_dot_py.write('new_var = 10\n')
self.assertTrue('a_var' not in pymod['pkg1'].get_object()['pkg2'].get_object())
def test_from_import_nonexistent_module(self):
code = 'from doesnotexistmod import DoesNotExistClass\n'
mod = self.pycore.get_string_module(code)
self.assertTrue('DoesNotExistClass' in mod)
self.assertEquals(get_base_type('Unknown'),
mod['DoesNotExistClass'].
get_object().get_type())
def test_from_import_nonexistent_name(self):
code = 'from samplemod import DoesNotExistClass\n'
mod = self.pycore.get_string_module(code)
self.assertTrue('DoesNotExistClass' in mod)
self.assertEquals(get_base_type('Unknown'),
mod['DoesNotExistClass'].
get_object().get_type())
def test_not_considering_imported_names_as_sub_scopes(self):
code = 'from samplemod import SampleClass\n'
scope = self.pycore.get_string_scope(code)
self.assertEquals(0, len(scope.get_scopes()))
def test_not_considering_imported_modules_as_sub_scopes(self):
scope = self.pycore.get_string_scope('import samplemod\n')
self.assertEquals(0, len(scope.get_scopes()))
def test_inheriting_dotted_base_class(self):
code = 'import samplemod\n' \
'class Derived(samplemod.SampleClass):\n' \
' pass\n'
mod = self.pycore.get_string_module(code)
derived = mod['Derived'].get_object()
self.assertTrue('sample_method' in derived)
def test_self_in_methods(self):
code = 'class Sample(object):\n' \
' def func(self):\n' \
' pass\n'
scope = self.pycore.get_string_scope(code)
sample_class = scope['Sample'].get_object()
func_scope = scope.get_scopes()[0].get_scopes()[0]
self.assertEquals(sample_class,
func_scope['self'].get_object().get_type())
self.assertTrue('func' in func_scope['self'].
get_object())
def test_none_assignments_in_classes(self):
code = 'class C(object):\n' \
' var = ""\n' \
' def f(self):\n' \
' self.var += "".join([])\n'
scope = self.pycore.get_string_scope(
code)
c_class = scope['C'].get_object()
self.assertTrue('var' in c_class)
def test_self_in_methods_with_decorators(self):
code = 'class Sample(object):\n' \
' @staticmethod\n' \
' def func(self):\n' \
' pass\n'
scope = self.pycore.get_string_scope(code)
sample_class = scope['Sample'].get_object()
func_scope = scope.get_scopes()[0].get_scopes()[0]
self.assertNotEquals(sample_class,
func_scope['self'].get_object().get_type())
def test_location_of_imports_when_importing(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('from samplemod import SampleClass\n')
scope = self.pycore.get_string_scope('from mod import SampleClass\n')
sample_class = scope['SampleClass']
samplemod = self.pycore.get_module('samplemod')
self.assertEquals((samplemod, 1), sample_class.get_definition_location())
def test_nested_modules(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod', pkg)
imported_module = self.pycore.get_module('pkg.mod')
scope = self.pycore.get_string_scope('import pkg.mod\n')
mod_pyobject = scope['pkg'].get_object()['mod']
self.assertEquals((imported_module, 1),
mod_pyobject.get_definition_location())
def test_reading_init_dot_py(self):
pkg = testutils.create_package(self.project, 'pkg')
init_dot_py = pkg.get_child('__init__.py')
init_dot_py.write('a_var = 1\n')
pkg_object = self.pycore.get_module('pkg')
self.assertTrue('a_var' in pkg_object)
def test_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object, mod2_object.get_attributes()['mod1'].get_object())
def test_relative_froms(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod1.write('def a_func():\n pass\n')
mod2.write('from mod1 import a_func\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object['a_func'].get_object(),
mod2_object['a_func'].get_object())
def test_relative_imports_for_string_modules(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.get_string_module(mod2.read(), mod2)
self.assertEquals(mod1_object, mod2_object['mod1'].get_object())
def test_relative_imports_for_string_scopes(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_scope = self.pycore.get_string_scope(mod2.read(), mod2)
self.assertEquals(mod1_object, mod2_scope['mod1'].get_object())
@testutils.run_only_for_25
def test_new_style_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('from . import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object, mod2_object['mod1'].get_object())
@testutils.run_only_for_25
def test_new_style_relative_imports2(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod1.write('def a_func():\n pass\n')
mod2.write('from ..mod1 import a_func\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object['a_func'].get_object(),
mod2_object['a_func'].get_object())
def test_invalidating_cache_for_from_imports_after_resource_change(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod2.write('def a_func():\n print(1)\n')
mod1.write('from mod2 import a_func\na_func()\n')
pymod1 = self.pycore.get_module('mod1')
pymod2 = self.pycore.get_module('mod2')
self.assertEquals(pymod1['a_func'].get_object(),
pymod2['a_func'].get_object())
mod2.write(mod2.read() + '\n')
pymod2 = self.pycore.get_module('mod2')
self.assertEquals(pymod1['a_func'].get_object(),
pymod2['a_func'].get_object())
def test_invalidating_superclasses_after_change(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class A(object):\n def func1(self):\n pass\n')
mod2.write('import mod1\nclass B(mod1.A):\n pass\n')
b_class = self.pycore.get_module('mod2')['B'].get_object()
self.assertTrue('func1' in b_class)
mod1.write('class A(object):\n def func2(self):\n pass\n')
self.assertTrue('func2' in b_class)
def test_caching_pymodule_with_syntax_errors(self):
self.project.prefs['ignore_syntax_errors'] = True
self.project.prefs['automatic_soa'] = True
self.project.pycore._init_automatic_soa()
source = 'import sys\nab cd'
mod = testutils.create_module(self.project, 'mod')
mod.write(source)
from rope.contrib import fixsyntax
fixer = fixsyntax.FixSyntax(self.project.pycore, source, mod, 10)
pymodule = fixer.get_pymodule()
self.assertTrue(pymodule.source_code.startswith('import sys\npass\n'))
class TextChangeDetectorTest(unittest.TestCase):
def test_trivial_case(self):
detector = _TextChangeDetector('\n', '\n')
self.assertFalse(detector.is_changed(1, 1))
def test_one_line_change(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n')
self.assertFalse(detector.is_changed(1, 1))
self.assertTrue(detector.is_changed(2, 2))
def test_line_expansion(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n4\n2\n')
self.assertFalse(detector.is_changed(1, 1))
self.assertFalse(detector.is_changed(2, 2))
def test_line_removals(self):
detector = _TextChangeDetector('1\n3\n4\n2\n', '1\n2\n')
self.assertFalse(detector.is_changed(1, 1))
self.assertTrue(detector.is_changed(2, 3))
self.assertFalse(detector.is_changed(4, 4))
def test_multi_line_checks(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n')
self.assertTrue(detector.is_changed(1, 2))
def test_consume_change(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n')
self.assertTrue(detector.is_changed(1, 2))
self.assertTrue(detector.consume_changes(1, 2))
self.assertFalse(detector.is_changed(1, 2))
class PyCoreProjectConfigsTest(unittest.TestCase):
def setUp(self):
super(PyCoreProjectConfigsTest, self).setUp()
self.project = None
def tearDown(self):
if self.project:
testutils.remove_project(self.project)
super(PyCoreProjectConfigsTest, self).tearDown()
def test_python_files_config(self):
self.project = testutils.sample_project(python_files=['myscript'])
myscript = self.project.root.create_file('myscript')
self.assertTrue(self.project.pycore.is_python_file(myscript))
def test_ignore_bad_imports(self):
self.project = testutils.sample_project(ignore_bad_imports=True)
pymod = self.project.pycore.get_string_module(
'import some_nonexistent_module\n')
self.assertFalse('some_nonexistent_module' in pymod)
def test_ignore_bad_imports_for_froms(self):
self.project = testutils.sample_project(ignore_bad_imports=True)
pymod = self.project.pycore.get_string_module(
'from some_nonexistent_module import var\n')
self.assertFalse('var' in pymod)
@testutils.assert_raises(exceptions.ModuleSyntaxError)
def test_reporting_syntax_errors_with_force_errors(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
mod = testutils.create_module(self.project, 'mod')
mod.write('syntax error ...\n')
self.project.pycore.resource_to_pyobject(mod, force_errors=True)
@testutils.assert_raises(exceptions.ModuleSyntaxError)
def test_reporting_syntax_errors_in_strings_with_force_errors(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
self.project.pycore.get_string_module('syntax error ...',
force_errors=True)
def test_not_raising_errors_for_strings_with_ignore_errors(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
self.project.pycore.get_string_module('syntax error ...')
@testutils.assert_raises(exceptions.ModuleSyntaxError)
def test_reporting_syntax_errors_with_force_errors_for_packages(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
pkg = testutils.create_package(self.project, 'pkg')
pkg.get_child('__init__.py').write('syntax error ...\n')
self.project.pycore.resource_to_pyobject(pkg, force_errors=True)
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(PyCoreTest))
result.addTests(unittest.makeSuite(PyCoreInProjectsTest))
result.addTests(unittest.makeSuite(TextChangeDetectorTest))
result.addTests(unittest.makeSuite(PyCoreProjectConfigsTest))
return result
if __name__ == '__main__':
unittest.main()
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initializes TPU system for TF 2.0."""
import REDACTED
import tensorflow as tf
def tpu_initialize(tpu_address, tpu_zone=None):
"""Initializes TPU for TF 2.0 training.
Args:
tpu_address: string, bns address of master TPU worker.
tpu_zone: optional string. zone in which the tpu resides in.
Returns:
A TPUClusterResolver.
"""
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu_address, zone=tpu_zone)
if tpu_address not in ('', 'local'):
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
return cluster_resolver
|
"""Global configuration of the Flask application"""
from __future__ import print_function
import os
import sys
def usage_warning(message):
"""Terminate application and display message if misconfigured."""
print(message)
sys.exit()
class DefaultConfig(object):
"""Configure default settings for all app environments."""
# Site title
SITE_TITLE = os.environ.get('SITE_TITLE') or usage_warning('Site title'
' not set.')
# Site domain anatomy and FQDN assembly
SITE_PROTOCOL = (os.environ.get('SITE_PROTOCOL') or
usage_warning('Site Protocol not set.'))
SITE_SUBDOMAIN = (os.environ.get('SITE_SUBDOMAIN') or
usage_warning('Subdomain not set.'))
SITE_DOMAIN = (os.environ.get('SITE_DOMAIN') or
usage_warning('Site domain not set.'))
SITE_TLD = os.environ.get('SITE_TLD') or usage_warning('Site TLD not set.')
SITE_FQDN = '{0}.{1}.{2}'.format(SITE_SUBDOMAIN, SITE_DOMAIN, SITE_TLD)
# Static file server domain anatomy and FQDN assembly
STATIC_ASSET_PROTOCOL = (os.environ.get('STATIC_ASSET_PROTOCOL') or
usage_warning('Site Protocol not set.'))
STATIC_ASSET_SUBDOMAIN = (os.environ.get('STATIC_ASSET_SUBDOMAIN') or
usage_warning('Subdomain not'))
STATIC_ASSET_DOMAIN = (os.environ.get('STATIC_ASSET_DOMAIN') or
usage_warning('Site domain not set.'))
STATIC_ASSET_TLD = (os.environ.get('STATIC_ASSET_TLD') or
usage_warning('Site TLD not set.'))
STATIC_ASSET_FQDN = '{0}.{1}.{2}'.format(STATIC_ASSET_SUBDOMAIN,
STATIC_ASSET_DOMAIN,
STATIC_ASSET_TLD)
# Redirection domain anatomy and FQDN assembly
REDIRECTION_PROTOCOL = (os.environ.get('REDIRECTION_PROTOCOL') or
usage_warning('Site Protocol not set.'))
REDIRECTION_SUBDOMAIN = (os.environ.get('REDIRECTION_SUBDOMAIN') or
usage_warning('Subdomain not set.'))
REDIRECTION_DOMAIN = (os.environ.get('REDIRECTION_DOMAIN') or
usage_warning('Site domain not set.'))
REDIRECTION_TLD = (os.environ.get('REDIRECTION_TLD') or
usage_warning('Site TLD not set.'))
REDIRECTION_FQDN = '{0}.{1}.{2}'.format(REDIRECTION_SUBDOMAIN,
REDIRECTION_DOMAIN,
REDIRECTION_TLD)
# Default "FROM" E-mail address
ADMIN_EMAIL_SENDER = '{0} <noreply@{1}.{2}>'.format(SITE_TITLE,
SITE_DOMAIN,
SITE_TLD)
# Used for hashing and encryption
SECRET_KEY = (os.environ.get('SECRET_KEY') or
usage_warning('SECRET_KEY not configured.'))
# Called from create_app() after specific configuration is selected
@staticmethod
def init_app(app):
"""Leave me here. Essential to bootstrap."""
pass
class DevelopmentConfig(DefaultConfig):
"""Configure dev environment."""
DEBUG = True
SQLALCHEMY_DATABASE_URI = (os.environ.get('DEV_DATABASE_URL') or
usage_warning('DEV database URL not '
'configured.'))
class TestingConfig(DefaultConfig):
"""Configure testing environment."""
TESTING = True
SQLALCHEMY_DATABASE_URI = (os.environ.get('TEST_DATABASE_URL') or
usage_warning('TEST database URL not'
' configured.'))
class StagingConfig(DefaultConfig):
"""Configure staging environment."""
STAGING = True
SQLALCHEMY_DATABASE_URI = (os.environ.get('STAGE_DATABASE_URL') or
usage_warning('STAGE database URL not '
'configured.'))
class ProductionConfig(DefaultConfig):
"""Configure production environment."""
PRODUCTION = True
SQLALCHEMY_DATABASE_URI = (os.environ.get('PROD_DATABASE_URL') or
usage_warning('PROD database URL not'
'configured.'))
CONFIG = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'staging': StagingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
# vim: et ts=4 sw=4
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
import datetime, re
# Create your models here.
class Posting(models.Model):
feed = models.ForeignKey('Feed')
origid = models.CharField(max_length=250,blank=True)
title = models.CharField(max_length=250)
link = models.URLField(blank=True)
content = models.TextField()
author = models.CharField(max_length=250, blank=True)
publishdate = models.DateTimeField(default=datetime.datetime.now())
def isread(self):
return (self.marks.filter(mark='READ').count() > 0)
def isstarred(self):
return (self.marks.filter(mark='STAR').count() > 0)
def displaywide(self):
if self.feed.wide_allowed and not self.isread() and (len(re.sub('<[^>]+>', '', self.content)) > 100 or re.search('<img [^>]+>', self.content)):
return True
return False
class Feed(models.Model):
url = models.URLField()
title = models.CharField(max_length=250)
category = TreeForeignKey('Category', related_name="feeds", blank=True, null=True)
wide_allowed = models.BooleanField(default=False, verbose_name="Allow big posts to display big")
def __unicode__(self):
return self.title
def unread(self):
#print "Feed ({}).unread will give {}.".format(self, len(filter(lambda p: not p.isread(), self.posting_set.all())))
print "Feed ({}).unread will give {}.".format(self, self.posting_set.filter(~models.Q(marks__mark='READ')).count())
#return len(filter(lambda p: not p.isread(), self.posting_set.all()))
return self.posting_set.filter(~models.Q(marks__mark='READ')).count()
class Enclosure(models.Model):
posting = models.ForeignKey('Posting', related_name='enclosures')
etype = models.CharField(max_length=200)
length = models.IntegerField(default=-1)
href = models.URLField(blank=False)
class Category(MPTTModel):
title = models.CharField(max_length=100)
parent = TreeForeignKey('self', blank=True, null=True, related_name='subcategories')
class MPTTMeta:
order_insertion_by = 'title'
def __unicode__(self):
return self.title
def getfeeds(self):
""" Return the feeds that are direct or indirect childs of this category. """
feedlist = [ f for f in self.feeds.all() ]
for c in self.subcategories.all():
feedlist += [ f for f in c.getfeeds() ]
return feedlist
def unread(self):
u = 0
for c in self.get_children():
#print "Getting unread for ", c
u += c.unread()
for f in self.feeds.all():
u += f.unread()
#print " u is now ", u
return u
class PostMark(models.Model):
posting = models.ForeignKey(Posting, related_name='marks')
MARKS = (
('READ', 'Read'),
('STAR', 'Starred'),
)
mark = models.CharField(max_length=5, choices=MARKS)
|
# $Id$
#
# Copyright (C)2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" functionality for drawing hierarchical catalogs on sping
canvases
"""
from sping import pid as piddle
class VisOpts(object):
circRad = 10
minCircRad = 4
maxCircRad = 16
circColor = piddle.Color(0.6, 0.6, 0.9)
terminalEmptyColor = piddle.Color(.8, .8, .2)
terminalOnColor = piddle.Color(0.8, 0.8, 0.8)
terminalOffColor = piddle.Color(0.2, 0.2, 0.2)
outlineColor = piddle.transparent
lineColor = piddle.Color(0, 0, 0)
lineWidth = 1
horizOffset = 5
vertOffset = 75
topMargin = 20
labelFont = piddle.Font(face='helvetica', size=10)
highlightColor = piddle.Color(1., 1., .4)
highlightWidth = 2
visOpts = VisOpts()
def GetMinCanvasSize(adjList, levelList):
maxAcross = -1
for k in levelList.keys():
nHere = len(levelList[k])
maxAcross = max(maxAcross, nHere)
nLevs = len(levelList.keys())
minSize = (maxAcross * (visOpts.minCircRad * 2 + visOpts.horizOffset),
visOpts.topMargin + nLevs * visOpts.vertOffset)
return minSize
def DrawHierarchy(adjList, levelList, canvas, entryColors=None, bitIds=None, minLevel=-1,
maxLevel=1e8):
"""
Arguments:
- adjList: adjacency list representation of the hierarchy to be drawn
- levelList: dictionary mapping level -> list of ids
"""
if bitIds is None:
bitIds = []
if entryColors is None:
entryColors = {}
levelLengths = levelList.keys()
levelLengths.sort()
minLevel = max(minLevel, levelLengths[0])
maxLevel = min(maxLevel, levelLengths[-1])
dims = canvas.size
drawLocs = {}
# start at the bottom of the hierarchy and work up:
for levelLen in range(maxLevel, minLevel - 1, -1):
nLevelsDown = levelLen - minLevel
pos = [0, visOpts.vertOffset * nLevelsDown + visOpts.topMargin]
ids = levelList.get(levelLen, [])
# FIX: we'll eventually want to figure out some kind of sorting here:
nHere = len(ids)
canvas.defaultFont = visOpts.labelFont
if nHere:
# figure the size of each node at this level:
spacePerNode = float(dims[0]) / nHere
spacePerNode -= visOpts.horizOffset
nodeRad = max(spacePerNode / 2, visOpts.minCircRad)
nodeRad = min(nodeRad, visOpts.maxCircRad)
spacePerNode = nodeRad * 2 + visOpts.horizOffset
# start in the midde of the canvas:
pos[0] = dims[0] / 2.
# maybe we need to offset a little:
if nHere % 2:
pos[0] -= spacePerNode / 2
# move to the left by half the number of nodes:
pos[0] -= (nHere // 2 - .5) * spacePerNode
# Find the locations and draw connectors:
for id in ids:
if not bitIds or id in bitIds:
# first do lines down to the next level:
if levelLen != maxLevel:
for neighbor in adjList[id]:
if drawLocs.has_key(neighbor):
p2 = drawLocs[neighbor][0]
canvas.drawLine(pos[0], pos[1], p2[0], p2[1], visOpts.lineColor, visOpts.lineWidth)
drawLocs[id] = tuple(pos), nodeRad
pos[0] += spacePerNode
for id in drawLocs.keys():
pos, nodeRad = drawLocs[id]
x1, y1 = pos[0] - nodeRad, pos[1] - nodeRad
x2, y2 = pos[0] + nodeRad, pos[1] + nodeRad
drawColor = entryColors.get(id, visOpts.circColor)
canvas.drawEllipse(x1, y1, x2, y2, visOpts.outlineColor, 0, drawColor)
label = str(id)
#txtLoc = ( pos[0]-canvas.stringWidth(label)/2,
# pos[1]+canvas.fontHeight()/4 )
txtLoc = (pos[0] + canvas.fontHeight() / 4, pos[1] + canvas.stringWidth(label) / 2)
canvas.drawString(label, txtLoc[0], txtLoc[1], angle=90)
return drawLocs
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with clusters
"""
import traceback
from nailgun.api.v1.handlers.base import BaseHandler
from nailgun.api.v1.handlers.base import DeferredTaskHandler
from nailgun.api.v1.handlers.base import DeploymentTasksHandler
from nailgun.api.v1.handlers.base import CollectionHandler
from nailgun.api.v1.handlers.base import SingleHandler
from nailgun import objects
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.validators.cluster import AttributesValidator
from nailgun.api.v1.validators.cluster import ClusterChangesValidator
from nailgun.api.v1.validators.cluster import ClusterValidator
from nailgun.api.v1.validators.cluster import VmwareAttributesValidator
from nailgun.logger import logger
from nailgun.task.manager import ApplyChangesTaskManager
from nailgun.task.manager import ClusterDeletionManager
from nailgun.task.manager import ResetEnvironmentTaskManager
from nailgun.task.manager import StopDeploymentTaskManager
from nailgun.task.manager import UpdateEnvironmentTaskManager
class ClusterHandler(SingleHandler):
"""Cluster single handler
"""
single = objects.Cluster
validator = ClusterValidator
@content
def DELETE(self, obj_id):
""":returns: {}
:http: * 202 (cluster deletion process launched)
* 400 (failed to execute cluster deletion process)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(self.single, obj_id)
task_manager = ClusterDeletionManager(cluster_id=cluster.id)
try:
logger.debug('Trying to execute cluster deletion task')
task_manager.execute()
except Exception as e:
logger.warn('Error while execution '
'cluster deletion task: %s' % str(e))
logger.warn(traceback.format_exc())
raise self.http(400, str(e))
raise self.http(202, '{}')
class ClusterCollectionHandler(CollectionHandler):
"""Cluster collection handler
"""
collection = objects.ClusterCollection
validator = ClusterValidator
class ClusterChangesHandler(DeferredTaskHandler):
log_message = u"Trying to start deployment at environment '{env_id}'"
log_error = u"Error during execution of deployment " \
u"task on environment '{env_id}': {error}"
task_manager = ApplyChangesTaskManager
validator = ClusterChangesValidator
class ClusterStopDeploymentHandler(DeferredTaskHandler):
log_message = u"Trying to stop deployment on environment '{env_id}'"
log_error = u"Error during execution of deployment " \
u"stopping task on environment '{env_id}': {error}"
task_manager = StopDeploymentTaskManager
class ClusterResetHandler(DeferredTaskHandler):
log_message = u"Trying to reset environment '{env_id}'"
log_error = u"Error during execution of resetting task " \
u"on environment '{env_id}': {error}"
task_manager = ResetEnvironmentTaskManager
class ClusterUpdateHandler(DeferredTaskHandler):
log_message = u"Trying to update environment '{env_id}'"
log_error = u"Error during execution of update task " \
u"on environment '{env_id}': {error}"
task_manager = UpdateEnvironmentTaskManager
class ClusterAttributesHandler(BaseHandler):
"""Cluster attributes handler
"""
fields = (
"editable",
)
validator = AttributesValidator
@content
def GET(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
if not cluster.attributes:
raise self.http(500, "No attributes found!")
return objects.Cluster.get_editable_attributes(cluster)
def PUT(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
# Due to the fact that we don't support PATCH requests and we're
# using PUT requests for the same purpose with non-complete data,
# let's follow DRY principle and call PATCH handler for now.
# In future, we have to use PUT method for overwrite the whole
# entity and PATCH method for changing its parts.
return self.PATCH(cluster_id)
@content
def PATCH(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
if not cluster.attributes:
raise self.http(500, "No attributes found!")
data = self.checked_data(cluster=cluster)
# if cluster is locked we have to check which attributes
# we want to change and block an entire operation if there
# one with always_editable=False.
if cluster.is_locked:
attrs = objects.Cluster.get_editable_attributes(cluster)
editable = attrs['editable']
for group_name in data.get('editable', {}):
# we need bunch of gets because the attribute may not
# even exist (user adds a new one)
metadata = editable.get(group_name, {}).get('metadata', {})
if not metadata.get('always_editable'):
raise self.http(403, (
"Environment attribute '{0}' couldn't be changed "
"after or during deployment.".format(group_name)))
objects.Cluster.patch_attributes(cluster, data)
return objects.Cluster.get_editable_attributes(cluster)
class ClusterAttributesDefaultsHandler(BaseHandler):
"""Cluster default attributes handler
"""
fields = (
"editable",
)
@content
def GET(self, cluster_id):
""":returns: JSONized default Cluster attributes.
:http: * 200 (OK)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
attrs = objects.Cluster.get_default_editable_attributes(cluster)
if not attrs:
raise self.http(500, "No attributes found!")
return {"editable": attrs}
@content
def PUT(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(
objects.Cluster,
cluster_id,
log_404=(
"error",
"There is no cluster "
"with id '{0}' in DB.".format(cluster_id)
)
)
if not cluster.attributes:
logger.error('ClusterAttributesDefaultsHandler: no attributes'
' found for cluster_id %s' % cluster_id)
raise self.http(500, "No attributes found!")
cluster.attributes.editable = (
objects.Cluster.get_default_editable_attributes(cluster))
objects.Cluster.add_pending_changes(cluster, "attributes")
logger.debug('ClusterAttributesDefaultsHandler:'
' editable attributes for cluster_id %s were reset'
' to default' % cluster_id)
return {"editable": cluster.attributes.editable}
class ClusterGeneratedData(BaseHandler):
"""Cluster generated data
"""
@content
def GET(self, cluster_id):
""":returns: JSONized cluster generated data
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
return cluster.attributes.generated
class ClusterDeploymentTasksHandler(DeploymentTasksHandler):
"""Cluster Handler for deployment graph serialization."""
single = objects.Cluster
class VmwareAttributesHandler(BaseHandler):
"""Vmware attributes handler
"""
fields = (
"editable",
)
validator = VmwareAttributesValidator
@content
def GET(self, cluster_id):
""":returns: JSONized Cluster vmware attributes.
:http: * 200 (OK)
* 400 (cluster doesn't accept vmware configuration)
* 404 (cluster not found in db |
cluster has no vmware attributes)
"""
cluster = self.get_object_or_404(
objects.Cluster, cluster_id,
log_404=(
"error",
"There is no cluster "
"with id '{0}' in DB.".format(cluster_id)
)
)
if not objects.Cluster.is_vmware_enabled(cluster):
raise self.http(400, "Cluster doesn't support vmware "
"configuration")
attributes = objects.Cluster.get_vmware_attributes(cluster)
if not attributes:
raise self.http(404, "No vmware attributes found")
return self.render(attributes)
@content
def PUT(self, cluster_id):
""":returns: JSONized Cluster vmware attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified |
cluster doesn't accept vmware configuration)
* 403 (attriutes can't be changed)
* 404 (cluster not found in db |
cluster has no vmware attributes)
"""
cluster = self.get_object_or_404(
objects.Cluster, cluster_id,
log_404=(
"error",
"There is no cluster "
"with id '{0}' in DB.".format(cluster_id)
)
)
if not objects.Cluster.is_vmware_enabled(cluster):
raise self.http(400, "Cluster doesn't support vmware "
"configuration")
attributes = objects.Cluster.get_vmware_attributes(cluster)
if not attributes:
raise self.http(404, "No vmware attributes found")
if cluster.is_locked:
raise self.http(403, "Environment attributes can't be changed "
"after or during deployment.")
data = self.checked_data(instance=attributes)
attributes = objects.Cluster.update_vmware_attributes(cluster, data)
return {"editable": attributes}
class VmwareAttributesDefaultsHandler(BaseHandler):
"""Vmware default attributes handler
"""
@content
def GET(self, cluster_id):
""":returns: JSONized default Cluster vmware attributes.
:http: * 200 (OK)
* 400 (cluster doesn't accept vmware configuration)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(
objects.Cluster, cluster_id,
log_404=(
"error",
"There is no cluster "
"with id '{0}' in DB.".format(cluster_id)
)
)
if not objects.Cluster.is_vmware_enabled(cluster):
raise self.http(400, "Cluster doesn't support vmware "
"configuration")
attributes = objects.Cluster.get_default_vmware_attributes(cluster)
return {"editable": attributes}
|
import Queue
import signal
import threading
import time
class ThreadManager():
"""Knows how to manage dem threads"""
quit = False
quitting = False
threads = []
def __init__(self, threads=[]):
"""Program entry point"""
self.threads = threads
self.register_signal_handlers()
def register_signal_handlers(self):
# Register signal handler
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
def run(self):
# Main loop
self.start_threads()
while not self.quit:
time.sleep(1)
def start_threads(self):
for t in self.threads:
t.start()
def signal_handler(self, signal, frame):
""" Handle signals """
print("Caught CTRL+C / SIGKILL")
if not self.quitting:
self.quitting = True
self.stop_threads()
self.quit = True
else:
print("BE PATIENT!@#~!#!@#$~!`1111")
def stop_threads(self):
"""Stops all threads and waits for them to quit"""
print("Stopping threads")
for thread in self.threads:
thread.stop()
while threading.activeCount() > 1:
print("Waiting for %s threads" % threading.activeCount())
time.sleep(1)
print("All threads stopped")
|
from django import forms
from django.contrib import admin
from api.models import AuthEvent, UserData, ACL, User
from authmethods.models import Message, ColorList, Code, Connection
from authmethods import METHODS
from django.contrib.auth.admin import UserAdmin
# Register your models here.
class AuthEventAdminForm(forms.ModelForm):
class Meta:
model = AuthEvent
fields = ('auth_method', 'census', 'auth_method_config', 'extra_fields',
'status')
choices = []
for k in METHODS.keys():
choices.append((k, k + ': ' + METHODS.get(k).DESCRIPTION))
widgets = {
'auth_method':
forms.Select(attrs={'obj':'str'}, choices=choices),
}
class AuthEventAdmin(admin.ModelAdmin):
form = AuthEventAdminForm
list_display = ('id', 'auth_method', 'status')
list_filter = ('auth_method', 'status')
search_fields = ('id',)
class UserDataAdmin(admin.ModelAdmin):
list_display = ('user', 'status')
search_fields = ('user__username', 'status', 'metadata', 'user__email', 'tlf')
class ACLAdmin(admin.ModelAdmin):
list_display = ('user', 'perm', 'object_type', 'object_id')
list_filter = ('perm', 'object_type')
search_fields = ('user__user__username', 'user__user__email', 'user__metadata',
'perm', 'object_type', 'object_id')
class ColorListAdmin(admin.ModelAdmin):
pass
class MessageAdmin(admin.ModelAdmin):
pass
class CodeAdmin(admin.ModelAdmin):
list_display = ('auth_event_id', 'user', 'code', 'created')
date_hierarchy = 'created'
class ConnectionAdmin(admin.ModelAdmin):
pass
class UserDataInline(admin.StackedInline):
model = UserData
class CustomUserAdmin(UserAdmin):
def change_view(self, request, obj_id):
# Has required fields and don't let us to modify users in the admin
#self.inlines=[UserDataInline,]
return super(CustomUserAdmin, self).change_view(request, obj_id)
def add_view(self, request):
self.inlines=[]
return super(CustomUserAdmin, self).add_view(request)
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
admin.site.register(AuthEvent, AuthEventAdmin)
admin.site.register(UserData, UserDataAdmin)
admin.site.register(ACL, ACLAdmin)
admin.site.register(ColorList, ColorListAdmin)
admin.site.register(Message, MessageAdmin)
admin.site.register(Code, CodeAdmin)
admin.site.register(Connection, ConnectionAdmin)
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line item creative associations (LICA) for a given
line item id. The statement retrieves up to the maximum page size limit of 500.
To create LICAs, run create_licas.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201308')
# Set the id of the line item to get LICAs by.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
# Create statement object to only select LICAs for the given line item id.
values = [{
'key': 'lineItemId',
'value': {
'xsi_type': 'NumberValue',
'value': line_item_id
}
}]
filter_statement = {'query': 'WHERE lineItemId = :lineItemId LIMIT 500',
'values': values}
# Get LICAs by statement.
response = lica_service.GetLineItemCreativeAssociationsByStatement(
filter_statement)[0]
licas = []
if 'results' in response:
licas = response['results']
# Display results.
for lica in licas:
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['lineItemId'], lica['creativeId'],
lica['status']))
print
print 'Number of results found: %s' % len(licas)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pretend
from zope.interface.verify import verifyClass
from warehouse.metrics import services
from warehouse.metrics.interfaces import IMetricsService
from warehouse.metrics.services import NullMetrics, DataDogMetrics
class TestNullMetrics:
def test_verify_service(self):
assert verifyClass(IMetricsService, NullMetrics)
def test_create_service(self):
assert isinstance(
NullMetrics.create_service(pretend.stub(), pretend.stub()), NullMetrics
)
@pytest.mark.parametrize(
"method",
[
"gauge",
"increment",
"decrement",
"histogram",
"distribution",
"timing",
"set",
],
)
def test_noop(self, method):
metrics = NullMetrics()
getattr(metrics, method)("my metric", pretend.stub())
def test_timed(self):
metrics = NullMetrics()
@metrics.timed("my metric")
@pretend.call_recorder
def fn(inp):
return inp
result = pretend.stub()
assert fn(result) is result
assert fn.calls == [pretend.call(result)]
with metrics.timed("my metric"):
pass
def test_event(self):
metrics = NullMetrics()
metrics.event(pretend.stub(), pretend.stub(), pretend.stub())
def test_service_check(self):
metrics = NullMetrics()
metrics.service_check(pretend.stub(), pretend.stub())
class TestDataDogMetrics:
def test_verify_service(self):
assert verifyClass(IMetricsService, DataDogMetrics)
def test_create_service_defaults(self, monkeypatch):
datadog_obj = pretend.stub()
datadog_cls = pretend.call_recorder(lambda **kw: datadog_obj)
monkeypatch.setattr(services, "DogStatsd", datadog_cls)
context = pretend.stub()
request = pretend.stub(registry=pretend.stub(settings={}))
metrics = DataDogMetrics.create_service(context, request)
assert metrics._datadog is datadog_obj
assert datadog_cls.calls == [
pretend.call(host="127.0.0.1", port=8125, namespace=None, use_ms=True)
]
def test_create_service_overrides(self, monkeypatch):
datadog_obj = pretend.stub()
datadog_cls = pretend.call_recorder(lambda **kw: datadog_obj)
monkeypatch.setattr(services, "DogStatsd", datadog_cls)
context = pretend.stub()
request = pretend.stub(
registry=pretend.stub(
settings={
"metrics.host": "example.com",
"metrics.port": "9152",
"metrics.namespace": "thing",
}
)
)
metrics = DataDogMetrics.create_service(context, request)
assert metrics._datadog is datadog_obj
assert datadog_cls.calls == [
pretend.call(host="example.com", port=9152, namespace="thing", use_ms=True)
]
@pytest.mark.parametrize(
"method",
[
"gauge",
"increment",
"decrement",
"histogram",
"distribution",
"timing",
"set",
],
)
def test_dispatches_basic(self, method):
method_fn = pretend.call_recorder(lambda *a, **kw: None)
datadog = pretend.stub(**{method: method_fn})
metrics = DataDogMetrics(datadog)
getattr(metrics, method)("my metric", 3, tags=["foo", "bar"], sample_rate=0.5)
assert method_fn.calls == [
pretend.call("my metric", 3, tags=["foo", "bar"], sample_rate=0.5)
]
def test_dispatches_timed(self):
timer = pretend.stub()
datadog = pretend.stub(timed=pretend.call_recorder(lambda *a, **k: timer))
metrics = DataDogMetrics(datadog)
assert (
metrics.timed("thing.timed", tags=["wat"], sample_rate=0.4, use_ms=True)
is timer
)
assert datadog.timed.calls == [
pretend.call("thing.timed", tags=["wat"], sample_rate=0.4, use_ms=True)
]
def test_dispatches_event(self):
datadog = pretend.stub(event=pretend.call_recorder(lambda *a, **k: None))
metrics = DataDogMetrics(datadog)
metrics.event(
"my title",
"this is text",
alert_type="thing",
aggregation_key="wat",
source_type_name="ok?",
date_happened="now?",
priority="who knows",
tags=["one", "two"],
hostname="example.com",
)
assert datadog.event.calls == [
pretend.call(
"my title",
"this is text",
alert_type="thing",
aggregation_key="wat",
source_type_name="ok?",
date_happened="now?",
priority="who knows",
tags=["one", "two"],
hostname="example.com",
)
]
def test_dispatches_service_check(self):
datadog = pretend.stub(
service_check=pretend.call_recorder(lambda *a, **k: None)
)
metrics = DataDogMetrics(datadog)
metrics.service_check(
"name!",
"ok",
tags=["one", "two"],
timestamp="now",
hostname="example.com",
message="my message",
)
assert datadog.service_check.calls == [
pretend.call(
"name!",
"ok",
tags=["one", "two"],
timestamp="now",
hostname="example.com",
message="my message",
)
]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PacketCaptureParameters(Model):
"""Parameters that define the create packet capture operation.
All required parameters must be populated in order to send to Azure.
:param target: Required. The ID of the targeted resource, only VM is
currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet,
the remaining bytes are truncated. Default value: 0 .
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
Default value: 1073741824 .
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in
seconds. Default value: 18000 .
:type time_limit_in_seconds: int
:param storage_location: Required.
:type storage_location:
~azure.mgmt.network.v2017_06_01.models.PacketCaptureStorageLocation
:param filters:
:type filters:
list[~azure.mgmt.network.v2017_06_01.models.PacketCaptureFilter]
"""
_validation = {
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'target': {'key': 'target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'filters', 'type': '[PacketCaptureFilter]'},
}
def __init__(self, **kwargs):
super(PacketCaptureParameters, self).__init__(**kwargs)
self.target = kwargs.get('target', None)
self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0)
self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824)
self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000)
self.storage_location = kwargs.get('storage_location', None)
self.filters = kwargs.get('filters', None)
|
#!/usr/bin/python
# $Id: 20120809$
# $Date: 2012-08-09 16:38:17$
# $Author: Marek Lukaszuk$
# idea from http://pastebin.com/dSJbGSBD
shell = "/usr/bin/tcsh"
from time import time,sleep
from struct import pack,unpack
from hmac import HMAC
from hashlib import sha1
from base64 import b32decode,b32encode
from random import randint
from getpass import getpass
import os
import sys
def constantTimeCompare(a, b):
if isinstance(a, unicode):
if not isinstance(b, unicode):
raise inputMismatchError
isPy3Bytes = False
elif isinstance(a, bytes):
if not isinstance(b, bytes):
raise inputMismatchError
isPy3Bytes = sys.version_info >= (3, 0)
else:
raise inputMismatchError
if isPy3Bytes:
for x, y in zip(a, b):
result |= x ^ y
else:
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def genotp():
return "{} {}{}{} {}{}{} {}{}{} {}{}{} {}{}{}".format(*b32encode(sha1(str(randint(0,9999999999999999))).digest()[:10]).lower())
def otpchk(key, response):
tm = int(time() / 30)
for delta in (-1,0,1):
s = key.replace(" ","").rstrip().upper()
secretkey = b32decode(s)
# convert timestamp to raw bytes
b = pack(">q", tm+delta)
# generate HMAC-SHA1 from timestamp based on secret key
hm = HMAC(secretkey, b, sha1).digest()
# extract 4 bytes from digest based on LSB
offset = ord(hm[-1]) & 0x0F
truncatedHash = hm[offset:offset+4]
# get the code from it
code = unpack(">L", truncatedHash)[0]
code &= 0x7FFFFFFF;
code %= 1000000;
code = "0"*(6-len(str(code)))+str(code)
if constantTimeCompare(code,response):
return True
return False
try:
shell = sys.argv[1]
except:
pass
try:
open(os.getenv("HOME")+"/.otpauth.conf")
except:
print "Can't read ~/.otpauth.conf file"
exit(1)
pw = getpass("pass: ")
if otpchk(open(os.getenv("HOME")+"/.otpauth.conf").read(),pw.strip()):
print "Cool"
#os.execv(shell,sys.argv)
|
#!/usr/bin/env python3
import sys
morf_model = sys.argv[1]
align_lex = sys.argv[2]
#morph_lex = sys.argv[3]
#word_misalign = sys.argv[4]
#word_hardalign = sys.argv[5]
morph_map = {}
for line in open(morf_model):
if line.startswith("#"):
continue
# morphs = line.split()[1::2]
# morph_map[''.join(morphs)] = tuple(morphs)
parts = line.split()
morph_map[parts[0]] = tuple(p.strip('+') for p in parts[1:])
align_map = {}
for line in open(align_lex):
graphs,phones = line.split()
graphs = tuple(tuple(g.split(":")) for g in graphs.split('|') if len(g) != 0)
phones = tuple(tuple(p.split(":")) for p in phones.split('|') if len(p) != 0)
word = ''.join(sum((list(b) for b in graphs), []))
align_map[word] = (graphs, phones)
assert len(graphs) == len(phones)
counter = 0
for word in morph_map.keys():
if word not in align_map:
counter += 1
#morph_lex = open(morph_lex, 'w', encoding='utf-8')
# word_misalign = open(word_misalign, 'w', encoding='utf-8')
# word_hardalign = open(word_hardalign, 'w', encoding='utf-8')
not_clean_counter = 0
for word, morphs in morph_map.items():
if word not in align_map:
# print(word, file=word_misalign)
counter += 1
continue
grapheme_index = 0
morph_index = 0
inmorph_index = 0
cur_phones = []
for g, p in zip(*align_map[word]):
assert ''.join(g) == word[grapheme_index:grapheme_index+len(g)]
morph = morphs[morph_index]
morph_length = len(morph)
grapheme_index += len(g)
if len(g) > (morph_length - inmorph_index):
not_clean_counter += 1
# print(word, file=word_hardalign)
# print(word, file=sys.stderr)
# print(' '.join(morph_map[word]), file=sys.stderr)
# print(' '.join(''.join(a) for a in align_map[word][0]), file=sys.stderr)
# print(' '.join(''.join(a) for a in align_map[word][1]), file=sys.stderr)
# print(file=sys.stderr)
# print(file=sys.stderr)
# print(file=sys.stderr)
start = morph_index == 0
morph_index += 1
end = morph_index == len(morphs)
print("{}{}{}\t{}".format('' if start else '+', morph, '' if end else '+', ' '.join(cur_phones)))
cur_phones = []
cur_phones.extend(p)
inmorph_index = len(g) - (morph_length - inmorph_index)
elif len(g) == (morph_length - inmorph_index):
cur_phones.extend(p)
start = morph_index == 0
morph_index += 1
end = morph_index == len(morphs)
print("{}{}{}\t{}".format('' if start else '+', morph, '' if end else '+', ' '.join(cur_phones)))
inmorph_index = 0
cur_phones = []
else:
cur_phones.extend(p)
inmorph_index += len(g)
print("{} word were not in align_map".format(counter), file=sys.stderr)
print("{} alignments were difficult".format(not_clean_counter), file=sys.stderr)
|
__author__ = 'tan'
import tornado.web
import tornado.httputil
import json
import struct
from handler_base import JBoxHandler
from juliabox.api import APIContainer, APIConnector
class APIHandler(JBoxHandler):
def get(self):
self.log_debug("API server handler got GET request")
return self.post()
@tornado.web.asynchronous
def post(self):
self.log_debug("API server handler got POST request")
uri = self.request.uri
self.log_debug("called with uri: %s", uri)
comps = filter(bool, uri.split('/'))
if len(comps) < 2:
self.send_error(status_code=404)
return
api_name = comps[0]
cmd = comps[1]
args = comps[2:]
vargs = self.request.arguments
self.log_debug("calling service:%s. cmd:%s. nargs: %d. nvargs: %d", api_name, cmd, len(args), len(vargs))
APIContainer.ensure_container_available(api_name)
APIConnector.send_recv(api_name, cmd, args=args, vargs=vargs,
on_recv=self.on_recv,
on_timeout=self.on_timeout,
on_overload=self.on_overload)
@staticmethod
def pack_into_binary(data):
packed = str()
for d in data:
packed += struct.pack('B', d)
return packed
def on_recv(self, msg):
self.log_info("response received for %s", self.request.uri)
if 'nid' in msg:
APIContainer.record_ping(msg['nid'])
code = msg.get('code', 500)
if code == 200:
start_line = tornado.httputil.ResponseStartLine('', self._status_code, self._reason)
hdrs = tornado.httputil.HTTPHeaders(msg.get('hdrs', {}))
data = msg['data']
if type(data) == list:
hdrs.add("Content-Length", str(len(data)))
data = APIHandler.pack_into_binary(data)
elif type(data) == dict:
data = json.dumps(data)
else:
data = str(data)
self.request.connection.write_headers(start_line, hdrs, data)
self.request.connection.finish()
else:
self.send_error(status_code=code)
def on_overload(self):
self.log_error("server overloaded %s", self.request.uri)
self.send_error(status_code=503)
def on_timeout(self):
self.log_error("timed out serving %s", self.request.uri)
self.send_error(status_code=408)
#
# def is_valid_api(self, api_name):
# return api_name in self.config("api_names", [])
|
#!/usr/bin/env python
import Queue
import threading
import urllib2
import time
from BeautifulSoup import BeautifulSoup
hosts = ["http://yahoo.com", "http://google.com", "http://amazon.com", "http://ibm.com", "http://apple.com"]
queue = Queue.Queue()
out_queue = Queue.Queue()
class ThreadUrl(threading.Thread):
"""
Threaded Url Grab
"""
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
# grabs host from queue
host = self.queue.get()
# grabs urls of hosts and then grabs chunk of webpage
url = urllib2.urlopen(host)
chunk = url.read()
# place chunk into out queue
self.out_queue.put(chunk)
# signals to queue job is done
self.queue.task_done()
class DatamineThread(threading.Thread):
"""
Thread Url Grab
"""
def __init__(self, out_queue):
threading.Thread.__init__(self)
self.out_queue = out_queue
def run(self):
while True:
# grabs hosts from queue
chunk = self.out_queue.get()
# parse the chunk
soup = BeautifulSoup(chunk)
print(soup.findAll(['title']))
# signals to queue job is done
self.out_queue.task_done()
def main():
# spawn a pool of threads, and pass them queue instance
for i in range(5):
t = ThreadUrl(queue, out_queue)
t.setDaemon(True)
t.start()
# populate queue with data
for host in hosts:
queue.put(host)
for i in range(5):
dt = DatamineThread(out_queue)
dt.setDaemon(True)
dt.start()
# wait on the queue until everything has been processed
queue.join()
out_queue.join()
if __name__ == "__main__":
start = time.time()
main()
print("Elapsed Time: %s" % (time.time() - start))
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from ranstring import randomByteString
from zope.interface import implementer
from twisted.internet import reactor, interfaces
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
# 2^63 - This is the maximum imposed by the WS protocol
FRAME_SIZE = 0x7FFFFFFFFFFFFFFF
@implementer(interfaces.IPushProducer)
class RandomByteStreamProducer:
"""
A Twisted Push Producer generating a stream of random octets sending out data
in a WebSockets message frame.
"""
def __init__(self, proto):
self.proto = proto
self.started = False
self.paused = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
if not self.started:
self.proto.beginMessage(isBinary=True)
self.proto.beginMessageFrame(FRAME_SIZE)
self.started = True
while not self.paused:
data = randomByteString(1024)
if self.proto.sendMessageFrameData(data) <= 0:
self.proto.beginMessageFrame(FRAME_SIZE)
print("new frame started!")
def stopProducing(self):
pass
class StreamingProducerHashClientProtocol(WebSocketClientProtocol):
"""
Streaming WebSockets client that generates stream of random octets
sent to streaming WebSockets server, which computes a running SHA-256,
which it will send every BATCH_SIZE octets back to us. This example
uses a Twisted producer to produce the byte stream as fast as the
receiver can consume, but not faster. Therefor, we don't need the
application-level flow control as with the other examples.
"""
def onOpen(self):
self.count = 0
producer = RandomByteStreamProducer(self)
self.registerProducer(producer, True)
producer.resumeProducing()
def onMessage(self, payload, isBinary):
print("Digest for batch {} computed by server: {}".format(self.count, payload.decode('utf8')))
self.count += 1
if __name__ == '__main__':
factory = WebSocketClientFactory(u"ws://127.0.0.1:9000")
factory.protocol = StreamingProducerHashClientProtocol
connectWS(factory)
reactor.run()
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
name='Sign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=1000)),
('display', models.CharField(unique=True, max_length=100)),
('status',
models.CharField(default=b'auto', max_length=10, choices=[(b'auto', b'Auto'), (b'eighth', b'Eighth Period'), (b'schedule',
b'Bell Schedule'),
(b'status', b'Schedule/Clock'), (b'url', b'Custom URL')])),
('eighth_block_increment', models.IntegerField(default=0, null=True, blank=True)),
('url', models.CharField(max_length=2000, null=True, blank=True)),
],
),
]
|
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
# pylint: disable-msg=E1101,W0613,W0603
from datetime import datetime, date
import time
import re
import copy
import itertools
import warnings
import os
from distutils.version import LooseVersion
import numpy as np
from pandas._libs import algos, lib, writers as libwriters
from pandas._libs.tslibs import timezones
from pandas.errors import PerformanceWarning
from pandas import compat
from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter
from pandas.core.dtypes.common import (
is_list_like,
is_categorical_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_datetime64_dtype,
ensure_object,
ensure_int64,
ensure_platform_int)
from pandas.core.dtypes.missing import array_equivalent
from pandas.core import config
from pandas.core.config import get_option
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.algorithms import match, unique
from pandas.core.arrays.categorical import (Categorical,
_factorize_from_iterables)
from pandas.core.internals import (BlockManager, make_block,
_block2d_to_blocknd,
_factor_indexer, _block_shape)
from pandas.core.index import ensure_index
from pandas.core.computation.pytables import Expr, maybe_expression
from pandas.io.common import _stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
from pandas import (Series, DataFrame, Panel, Index,
MultiIndex, Int64Index, isna, concat, to_datetime,
SparseSeries, SparseDataFrame, PeriodIndex,
DatetimeIndex, TimedeltaIndex)
# versioning attribute
_version = '0.15.2'
# encoding
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
if PY3:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""Ensure that an index / column name is a str (python 3) or
unicode (python 2); otherwise they may be np.string dtype.
Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, compat.string_types):
name = compat.text_type(name)
return name
Term = Expr
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {
u('f'): 'fixed',
u('fixed'): 'fixed',
u('t'): 'table',
u('table'): 'table',
}
format_deprecate_doc = """
the table keyword has been deprecated
use the format='fixed(f)|table(t)' keyword instead
fixed(f) : specifies the Fixed format
and is the default for put operations
table(t) : specifies the Table format
and is the default for append operations
"""
# map object types
_TYPE_MAP = {
Series: u('series'),
SparseSeries: u('sparse_series'),
DataFrame: u('frame'),
SparseDataFrame: u('sparse_frame'),
Panel: u('wide'),
}
# storer class map
_STORER_MAP = {
u('Series'): 'LegacySeriesFixed',
u('DataFrame'): 'LegacyFrameFixed',
u('DataMatrix'): 'LegacyFrameFixed',
u('series'): 'SeriesFixed',
u('sparse_series'): 'SparseSeriesFixed',
u('frame'): 'FrameFixed',
u('sparse_frame'): 'SparseFrameFixed',
u('wide'): 'PanelFixed',
}
# table class map
_TABLE_MAP = {
u('generic_table'): 'GenericTable',
u('appendable_series'): 'AppendableSeriesTable',
u('appendable_multiseries'): 'AppendableMultiSeriesTable',
u('appendable_frame'): 'AppendableFrameTable',
u('appendable_multiframe'): 'AppendableMultiFrameTable',
u('appendable_panel'): 'AppendablePanelTable',
u('worm'): 'WORMTable',
u('legacy_frame'): 'LegacyFrameTable',
u('legacy_panel'): 'LegacyPanelTable',
}
# axes map
_AXES_MAP = {
DataFrame: [0],
Panel: [1, 2]
}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', False, dropna_doc,
validator=config.is_bool)
config.register_option(
'default_format', None, format_doc,
validator=config.is_one_of_factory(['fixed', 'table', None])
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
if LooseVersion(tables.__version__) < LooseVersion('3.0.0'):
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == 'strict')
except:
pass
return _table_mod
# interface to/from ###
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda store: store.put(key, value, **kwargs)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, string_types):
with HDFStore(path_or_buf, mode=mode, complevel=complevel,
complib=complib) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(path_or_buf, key=None, mode='r', **kwargs):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : string, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
Supports any object implementing the ``__fspath__`` protocol.
This includes :class:`pathlib.Path` and py._path.local.LocalPath
objects.
.. versionadded:: 0.19.0 support for pathlib, py.path.
.. versionadded:: 0.21.0 support for __fspath__ protocol.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, optional
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
item : object
The selected object. Return type depends on the object stored.
See Also
--------
pandas.DataFrame.to_hdf : write a HDF file from a DataFrame
pandas.HDFStore : low-level access to HDF files
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z'])
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
if mode not in ['r', 'r+', 'a']:
raise ValueError('mode {0} is not allowed while performing a read. '
'Allowed modes are r, r+ and a.'.format(mode))
# grab the scope
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError('The HDFStore must be open for reading.')
store = path_or_buf
auto_close = False
else:
path_or_buf = _stringify_path(path_or_buf)
if not isinstance(path_or_buf, string_types):
raise NotImplementedError('Support for generic buffers has not '
'been implemented.')
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise compat.FileNotFoundError(
'File %s does not exist' % path_or_buf)
store = HDFStore(path_or_buf, mode=mode, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError('No dataset in HDF5 file.')
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError('key must be provided when HDF5 file '
'contains multiple datasets.')
key = candidate_only_group._v_pathname
return store.select(key, auto_close=auto_close, **kwargs)
except:
# if there is an error, close the store
try:
store.close()
except:
pass
raise
def _is_metadata_of(group, parent_group):
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == 'meta':
return True
current = current._v_parent
return False
class HDFStore(StringMixin):
"""
dict-like IO interface for storing pandas objects in PyTables
either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
try:
import tables # noqa
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem '
'importing'.format(ex=str(ex)))
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
"complib only supports {libs} compression.".format(
libs=tables.filters.all_complibs))
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = _stringify_path(path)
if mode is None:
mode = 'a'
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
return self.remove(key)
def __getattr__(self, name):
""" allow attribute access to get stores """
try:
return self.get(name)
except:
pass
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __contains__(self, key):
""" check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self):
return len(self.groups())
def __unicode__(self):
return '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
"Re-opening the file [{0}] with mode [{1}] "
"will delete the current file!"
.format(self._path, self._mode)
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(self._complevel, self._complib,
fletcher32=self._fletcher32)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print('Opening %s in read-only mode' % self._path)
self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
except (ValueError) as e:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if 'FILE_OPEN_POLICY' in str(e):
e = ValueError(
"PyTables [{version}] no longer supports opening multiple "
"files\n"
"even in read-only mode on this HDF5 version "
"[{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 "
"which allows\n"
"files to be opened multiple times at once\n"
.format(version=tables.__version__,
hdf_version=tables.get_hdf5_version()))
raise e
except (Exception) as e:
# trying to read from a non-existent file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self):
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync=False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except:
pass
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : same type as object stored in file
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
"""
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
return self.get_storer(key).read_coordinates(where=where, start=start,
stop=stop, **kwargs)
def select_column(self, key, column, **kwargs):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
def select_as_multiple(self, keys, where=None, selector=None, columns=None,
start=None, stop=None, iterator=False,
chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, string_types):
return self.select(key=keys, where=where, columns=columns,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError("Invalid table [%s]" % k)
if not t.is_table:
raise TypeError(
"object [%s] is not a table, and cannot be used in all "
"select as multiple" % t.pathname
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError(
"all tables must have exactly the same nrows!")
# axis is the concentation axes
axis = list({t.non_index_axes[0][0] for t in tbls})[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [t.read(where=_where, columns=columns, start=_start,
stop=_stop, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result(coordinates=True)
def put(self, key, value, format=None, append=False, **kwargs):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
data_columns : list of columns to create as data columns, or True to
use all columns. See
`here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
"""
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs)
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except Exception:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
# remove the node
if com._all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
def append(self, key, value, format=None, append=True, columns=None,
dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format: 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default True, append the input data to the
existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError("columns is not a supported keyword in append, "
"try data_columns")
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, dropna=dropna,
**kwargs)
def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Parameters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not s.is_table:
raise TypeError(
"cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g for g in self._handle.walk_groups()
if (not isinstance(g, _table_mod.link.Link) and
(getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
g._v_name != u('table'))))
]
def walk(self, where="/"):
""" Walk the pytables group hierarchy for pandas objects
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
.. versionadded:: 0.24.0
Parameters
----------
where : str, optional
Group where to start walking.
If not supplied, the root group is used.
Yields
------
path : str
Full path to a group (without trailing '/')
groups : list of str
names of the groups contained in `path`
leaves : list of str
names of the pandas objects contained in `path`
"""
_tables()
self._check_if_open()
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, 'pandas_type', None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, 'pandas_type', None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip('/'), groups, leaves)
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
except:
return None
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
raise KeyError('No object named {} in the file'.format(key))
s = self._create_storer(group)
s.infer_axes()
return s
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file,
mode=mode,
complib=complib,
complevel=complevel,
fletcher32=fletcher32)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if s.is_table:
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k, data, index=index,
data_columns=getattr(s, 'data_columns', None),
encoding=s.encoding
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self):
"""
print detailed information on the store
.. versionadded:: 0.21.0
"""
output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
if self.is_open:
lkeys = sorted(list(self.keys()))
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
values.append("[invalid_HDFStore node: %s]"
% pprint_thing(detail))
output += adjoin(12, keys, values)
else:
output += 'Empty'
else:
output += "File is CLOSED"
return output
# private methods ######
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError("{0} file is not open!".format(self._path))
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
return kwargs
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [%s] [group->%s,"
"value->%s,format->%s,append->%s,kwargs->%s]"
% (t, group, type(value), format, append, kwargs)
)
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
pt = u('frame_table')
tt = u('generic_table')
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed")
else:
try:
pt = _TYPE_MAP[type(value)]
except:
error('_TYPE_MAP')
# we are actually a table
if format == 'table':
pt += u('_table')
# a storer node
if u('table') not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
error('_STORER_MAP')
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == u('series_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_series')
elif index.nlevels > 1:
tt = u('appendable_multiseries')
elif pt == u('frame_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_frame')
elif index.nlevels > 1:
tt = u('appendable_multiframe')
elif pt == u('wide_table'):
tt = u('appendable_panel')
elif pt == u('ndim_table'):
tt = u('appendable_ndim')
else:
# distiguish between a frame/table
tt = u('legacy_panel')
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u('value'):
tt = u('legacy_frame')
except:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
except:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
paths = key.split('/')
# recursively create the groups
path = '/'
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith('/'):
new_path += '/'
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if (not s.is_table or
(s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError(
'Compression not supported on Fixed format stores'
)
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
if s.is_table and index:
s.create_index(columns=index)
def _read_group(self, group, **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
def get_store(path, **kwargs):
""" Backwards compatible alias for ``HDFStore``
"""
warnings.warn(
"get_store is deprecated and be "
"removed in a future version\n"
"HDFStore(path, **kwargs) is the replacement",
FutureWarning,
stacklevel=6)
return HDFStore(path, **kwargs)
class TableIterator(object):
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : boolean, whether to use the default iterator
chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
def __init__(self, store, s, func, where, nrows, start=None, stop=None,
iterator=False, chunksize=None, auto_close=False):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates=False):
# return the actual iterator
if self.chunksize is not None:
if not self.s.is_table:
raise TypeError(
"can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
where = self.s.read_coordinates(where=self.where, start=self.start,
stop=self.stop)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol(StringMixin):
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None,
itemsize=None, name=None, axis=None, kind_attr=None,
pos=None, freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
self.itemsize = itemsize
self.name = name
self.cname = cname
self.kind_attr = kind_attr
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.table = None
self.meta = None
self.metadata = None
if name is not None:
self.set_name(name, kind_attr)
if pos is not None:
self.set_pos(pos)
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "%s_kind" % name
if self.cname is None:
self.cname = name
return self
def set_axis(self, axis):
""" set the axis over which I index """
self.axis = axis
return self
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self
def set_table(self, table):
self.table = table
return self
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.axis,
self.pos,
self.kind)))
return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all(getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'axis', 'pos'])
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except:
False
def copy(self):
new_self = copy.copy(self)
return new_self
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_metadata(handler)
return new_self
def convert(self, values, nan_rep, encoding, errors):
""" set the values from this selection: take = take ownership """
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
values = _maybe_convert(values, self.kind, encoding, errors)
kwargs = dict()
if self.freq is not None:
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
try:
self.values = Index(values, **kwargs)
except:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
kwargs['freq'] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u('string'):
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables(
).StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append, **kwargs):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler, append, **kwargs):
self.set_table(handler.table)
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == u('string'):
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [%s] in [%s] "
"column but\nthis column has a limit of [%s]!\n"
"Consider using min_itemsize to preset the sizes on "
"these columns" % (itemsize, self.cname, c.itemsize))
return c.itemsize
return None
def validate_attr(self, append):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError("incompatible kind in col [%s - %s]" %
(existing_kind, self.kind))
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = _get_info(info, self.name)
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
"invalid info for [%s] for [%s], existing_value [%s] "
"conflicts with new value [%s]"
% (self.name, key, existing_value, value))
else:
if value is not None or existing_value is not None:
idx[key] = value
return self
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this column """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this column """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
""" retrieve the metadata for this columns """
self.metadata = handler.read_metadata(self.cname)
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if new_metadata is not None and cur_metadata is not None \
and not array_equivalent(new_metadata, cur_metadata):
raise ValueError("cannot append a categorical with "
"different categories to the existing")
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self):
return False
def convert(self, values, nan_rep, encoding, errors):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
return self
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ['tz', 'ordered']
@classmethod
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_%d' % i
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
name = "values_%s" % m.groups()[0]
except:
pass
return cls(name=name, cname=cname, **kwargs)
def __init__(self, values=None, kind=None, typ=None,
cname=None, data=None, meta=None, metadata=None,
block=None, **kwargs):
super(DataCol, self).__init__(values=values, kind=kind, typ=typ,
cname=cname, **kwargs)
self.dtype = None
self.dtype_attr = u("%s_dtype" % self.name)
self.meta = meta
self.meta_attr = u("%s_meta" % self.name)
self.set_data(data)
self.set_metadata(metadata)
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.dtype,
self.kind,
self.shape)))
return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all(getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos'])
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
if dtype is not None:
self.dtype = dtype
self.set_kind()
elif self.dtype is None:
self.dtype = data.dtype.name
self.set_kind()
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):
self.kind = 'string'
elif dtype.startswith(u('float')):
self.kind = 'float'
elif dtype.startswith(u('complex')):
self.kind = 'complex'
elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
self.kind = 'integer'
elif dtype.startswith(u('date')):
self.kind = 'datetime'
elif dtype.startswith(u('timedelta')):
self.kind = 'timedelta'
elif dtype.startswith(u('bool')):
self.kind = 'bool'
else:
raise AssertionError(
"cannot interpret dtype of [%s] in [%s]" % (dtype, self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, errors='strict'):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors)
# set as a data block
else:
self.set_atom_data(block)
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding, errors):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel())
if inferred_type != 'string':
# we cannot serialize this data, so report an exception on a column
# by column basis
for i, item in enumerate(block_items):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
raise TypeError(
"Cannot serialize the column [%s] because\n"
"its data contents are [%s] object dtype"
% (item, inferred_type)
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding, errors)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(
self.name) or min_itemsize.get('values') or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt%sCol" % kind[4:]
else:
col_name = "%sCol" % kind.capitalize()
return getattr(_tables(), col_name)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
def set_atom_complex(self, block):
self.kind = block.dtype.name
itemsize = int(self.kind.split('complex')[-1]) // 8
self.typ = _tables().ComplexCol(
itemsize=itemsize, shape=block.shape[0])
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_categorical(self, block, items, info=None, values=None):
# currently only supports a 1-D categorical
# in a 1-D block
values = block.values
codes = values.codes
self.kind = 'integer'
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
if len(items) > 1:
raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
self.set_data(_block_shape(codes))
# write the categories
self.meta = 'category'
self.set_metadata(block.values.categories)
# update the info
self.update_info(info)
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_datetime64(self, block, values=None):
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'datetime64')
def set_atom_datetime64tz(self, block, info, values=None):
if values is None:
values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
# store a converted timezone
self.tz = _get_tz(block.values.tz)
self.update_info(info)
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
self.set_data(values, 'datetime64')
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_timedelta64(self, block, values=None):
self.kind = 'timedelta64'
self.typ = self.get_atom_timedelta64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'timedelta64')
@property
def shape(self):
return getattr(self.data, 'shape', None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
def convert(self, values, nan_rep, encoding, errors):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
self.set_data(values)
# use the meta if needed
meta = _ensure_decoded(self.meta)
# convert to the correct dtype
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == u('datetime64'):
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
elif dtype == u('timedelta64'):
self.data = np.asarray(self.data, dtype='m8[ns]')
elif dtype == u('date'):
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
except ValueError:
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
elif dtype == u('datetime'):
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
elif meta == u('category'):
# we have a categorical
categories = self.metadata
codes = self.data.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum().values
self.data = Categorical.from_codes(codes,
categories=categories,
ordered=self.ordered)
else:
try:
self.data = self.data.astype(dtype, copy=False)
except:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == u('string'):
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding, errors=errors)
return self
def get_attr(self):
""" get the data for this column """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
raise ValueError("cannot have non-object label DataIndexableCol")
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)()
def get_atom_datetime64(self, block):
return _tables().Int64Col()
def get_atom_timedelta64(self, block):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed(StringMixin):
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : my parent HDFStore
group : the group node where the table resides
"""
pandas_kind = None
obj_type = None
ndim = None
is_table = False
def __init__(self, parent, group, encoding=None, errors='strict',
**kwargs):
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
self.set_version()
@property
def is_old_version(self):
return (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1)
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple(int(x) for x in version.split('.'))
if len(self.version) == 2:
self.version = self.version + (0,)
except:
self.version = (0, 0, 0)
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs,
'pandas_type', None))
@property
def format_type(self):
return 'fixed'
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
s = "[%s]" % ','.join(pprint_thing(x) for x in s)
return "%-12.12s (shape->%s)" % (self.pandas_type, s)
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self):
return self.parent._complevel
@property
def _fletcher32(self):
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self):
return False
@property
def nrows(self):
return getattr(self.storable, 'nrows', None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(self, **kwargs):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement")
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, start=None, stop=None, **kwargs):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com._all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = {v: k for k, v in compat.iteritems(_index_type_map)}
attributes = []
# indexer helpders
def _class_to_alias(self, cls):
return self._index_type_map.get(cls, '')
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
result = DatetimeIndex._simple_new(values.values, None,
freq=freq)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
return f
elif klass == PeriodIndex:
def f(values, freq=None, tz=None):
return PeriodIndex._simple_new(values, None, freq=freq)
return f
return klass
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
"""
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs
@property
def is_exists(self):
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))
self.errors = getattr(self.attrs, 'errors', 'strict')
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == u('datetime64'):
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == u('timedelta64'):
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
def read_index(self, key, **kwargs):
variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
if variety == u('multi'):
return self.read_multi_index(key, **kwargs)
elif variety == u('block'):
return self.read_block_index(key, **kwargs)
elif variety == u('sparseint'):
return self.read_sparse_intindex(key, **kwargs)
elif variety == u('regular'):
_, index = self.read_index_node(getattr(self.group, key), **kwargs)
return index
else: # pragma: no cover
raise TypeError('unrecognized index variety: %s' % variety)
def write_index(self, key, index):
if isinstance(index, MultiIndex):
setattr(self.attrs, '%s_variety' % key, 'multi')
self.write_multi_index(key, index)
elif isinstance(index, BlockIndex):
setattr(self.attrs, '%s_variety' % key, 'block')
self.write_block_index(key, index)
elif isinstance(index, IntIndex):
setattr(self.attrs, '%s_variety' % key, 'sparseint')
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
converted = _convert_index(index, self.encoding, self.errors,
self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if hasattr(index, 'freq'):
node._v_attrs.freq = index.freq
if hasattr(index, 'tz') and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_block_index(self, key, index):
self.write_array('%s_blocs' % key, index.blocs)
self.write_array('%s_blengths' % key, index.blengths)
setattr(self.attrs, '%s_length' % key, index.length)
def read_block_index(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
blocs = self.read_array('%s_blocs' % key, **kwargs)
blengths = self.read_array('%s_blengths' % key, **kwargs)
return BlockIndex(length, blocs, blengths)
def write_sparse_intindex(self, key, index):
self.write_array('%s_indices' % key, index.indices)
setattr(self.attrs, '%s_length' % key, index.length)
def read_sparse_intindex(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
indices = self.read_array('%s_indices' % key, **kwargs)
return IntIndex(length, indices)
def write_multi_index(self, key, index):
setattr(self.attrs, '%s_nlevels' % key, index.nlevels)
for i, (lev, lab, name) in enumerate(zip(index.levels,
index.labels,
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
conv_level = _convert_index(lev, self.encoding, self.errors,
self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '%s_name%d' % (key, i), name)
# write the labels
label_key = '%s_label%d' % (key, i)
self.write_array(label_key, lab)
def read_multi_index(self, key, **kwargs):
nlevels = getattr(self.attrs, '%s_nlevels' % key)
levels = []
labels = []
names = []
for i in range(nlevels):
level_key = '%s_level%d' % (key, i)
name, lev = self.read_index_node(getattr(self.group, level_key),
**kwargs)
levels.append(lev)
names.append(name)
label_key = '%s_label%d' % (key, i)
lab = self.read_array(label_key, **kwargs)
labels.append(lab)
return MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=True)
def read_index_node(self, node, start=None, stop=None):
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if ('shape' in node._v_attrs and
self._is_empty_array(getattr(node._v_attrs, 'shape'))):
data = np.empty(getattr(node._v_attrs, 'shape'),
dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
index_class = self._alias_to_class(_ensure_decoded(
getattr(node._v_attrs, 'index_class', '')))
factory = self._get_index_factory(index_class)
kwargs = {}
if u('freq') in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
if u('tz') in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
if kind in (u('date'), u('datetime')):
index = factory(_unconvert_index(data, kind,
encoding=self.encoding,
errors=self.errors),
dtype=object, **kwargs)
else:
index = factory(_unconvert_index(data, kind,
encoding=self.encoding,
errors=self.errors), **kwargs)
index.name = name
return name, index
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
def _is_empty_array(self, shape):
"""Returns true if any axis is zero length."""
return any(x == 0 for x in shape)
def write_array(self, key, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
transposed = False
if is_categorical_dtype(value):
raise NotImplementedError('Cannot store a category dtype in '
'a HDF5 dataset that uses format='
'"fixed". Use format="table".')
if not empty_array:
value = value.T
transposed = True
if self._filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
getattr(self.group, key)._v_attrs.transposed = transposed
else:
self.write_array_empty(key, value)
return
if value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel())
if empty_array:
pass
elif inferred_type == 'string':
pass
else:
try:
items = list(items)
except:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
if empty_array:
self.write_array_empty(key, value)
else:
if is_datetime64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key,
value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = 'datetime64'
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class LegacyFixed(GenericFixed):
def read_index_legacy(self, key, start=None, stop=None):
node = getattr(self.group, key)
data = node[start:stop]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind, encoding=self.encoding,
errors=self.errors)
class LegacySeriesFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
class LegacyFrameFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
return DataFrame(values, index=index, columns=columns)
class SeriesFixed(GenericFixed):
pandas_kind = u('series')
attributes = ['name']
@property
def shape(self):
try:
return len(getattr(self.group, 'values')),
except:
return None
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index', **kwargs)
values = self.read_array('values', **kwargs)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super(SeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseFixed(GenericFixed):
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
"""
kwargs = super(SparseFixed, self).validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs
class SparseSeriesFixed(SparseFixed):
pandas_kind = u('sparse_series')
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
kind=self.kind or u('block'),
fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
super(SparseSeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
self.attrs.name = obj.name
self.attrs.fill_value = obj.fill_value
self.attrs.kind = obj.kind
class SparseFrameFixed(SparseFixed):
pandas_kind = u('sparse_frame')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
key = 'sparse_series_%s' % c
s = SparseSeriesFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[c] = s.read()
return SparseDataFrame(sdict, columns=columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
for name, ss in compat.iteritems(obj):
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
class BlockManagerFixed(GenericFixed):
attributes = ['ndim', 'nblocks']
is_shape_reversed = False
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, 'block%d_items' % i)
shape = getattr(node, 'shape', None)
if shape is not None:
items += shape[0]
# data shape
node = getattr(self.group, 'block0_values')
shape = getattr(node, 'shape', None)
if shape is not None:
shape = list(shape[0:(ndim - 1)])
else:
shape = []
shape.append(items)
# hacky - this works for frames, but is reversed for panels
if self.is_shape_reversed:
shape = shape[::-1]
return shape
except:
return None
def read(self, start=None, stop=None, **kwargs):
# start, stop applied to rows, so 0th axis only
kwargs = self.validate_read(kwargs)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index('axis%d' % i, start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(self.nblocks):
blk_items = self.read_index('block%d_items' % i)
values = self.read_array('block%d_values' % i,
start=_start, stop=_stop)
blk = make_block(values,
placement=items.get_indexer(blk_items))
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
super(BlockManagerFixed, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError(
"Columns index has to be unique for fixed format")
self.write_index('axis%d' % i, ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array('block%d_values' % i, blk.values, items=blk_items)
self.write_index('block%d_items' % i, blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = u('frame')
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
pandas_kind = u('wide')
obj_type = Panel
is_shape_reversed = True
def write(self, obj, **kwargs):
obj._consolidate_inplace()
return super(PanelFixed, self).write(obj, **kwargs)
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = u('wide_table')
table_type = None
levels = 1
is_table = True
is_shape_reversed = False
def __init__(self, *args, **kwargs):
super(Table, self).__init__(*args, **kwargs)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
self.selection = None
@property
def table_type_short(self):
return self.table_type.split('_')[0]
@property
def format_type(self):
return 'table'
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
dc = ",dc->[%s]" % ','.join(
self.data_columns) if len(self.data_columns) else ''
ver = ''
if self.is_old_version:
ver = "[%s]" % '.'.join(str(x) for x in self.version)
return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (
self.pandas_type, ver, self.table_type_short, self.nrows,
self.ncols, ','.join(a.name for a in self.index_axes), dc
)
def __getitem__(self, c):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError("incompatible table_type with existing [%s - %s]" %
(other.table_type, self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [%s] on appending data [%s] "
"vs current table [%s]" % (c, sax, oax))
# should never get here
raise Exception(
"invalid combinate of [%s] on appending data [%s] vs "
"current table [%s]" % (c, sv, ov))
@property
def is_multi_index(self):
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [
c.name for c in self.values_axes if c.metadata is not None]
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self):
""" has this table been created """
return u('table') in self.group
@property
def storable(self):
return getattr(self.group, 'table', None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self):
""" the number of total columns in the values axes """
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self):
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
def index_cols(self):
""" return a list of my index cols """
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, errors=self.errors,
nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_info(self):
""" update our table index info """
self.attrs.info = self.info
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(
self.attrs, 'non_index_axes', None) or []
self.data_columns = getattr(
self.attrs, 'data_columns', None) or []
self.info = getattr(
self.attrs, 'info', None) or dict()
self.nan_rep = getattr(self.attrs, 'nan_rep', None)
self.encoding = _ensure_encoding(
getattr(self.attrs, 'encoding', None))
self.errors = getattr(self.attrs, 'errors', 'strict')
self.levels = getattr(
self.attrs, 'levels', None) or []
self.index_axes = [
a.infer(self) for a in self.indexables if a.is_an_indexable
]
self.values_axes = [
a.infer(self) for a in self.indexables if not a.is_an_indexable
]
self.metadata = getattr(
self.attrs, 'metadata', None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [%s] which is not an axis or "
"data_column" % k)
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
if self._indexables is None:
self._indexables = []
# index columns
self._indexables.extend([
IndexCol(name=name, axis=axis, pos=i)
for i, (axis, name) in enumerate(self.attrs.index_cols)
])
# values columns
dc = set(self.data_columns)
base_pos = len(self._indexables)
def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
return klass.create_for_block(i=i, name=c, pos=base_pos + i,
version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return self._indexables
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Parameters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError(
'Columns containing complex values can be stored '
'but cannot'
' be indexed when using table format. Either use '
'fixed format, set index=False, or do not include '
'the columns containing complex values to '
'data_columns when initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
for success
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(self, where=where, **kwargs)
values = self.selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding,
errors=self.errors)
return True
def get_object(self, obj):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError("cannot properly create the storer for: "
"[group->%s,value->%s]"
% (self.group._v_name, type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.errors = existing_table.errors
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.errors, self.format_type
).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis),
np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(np.array(sorted(append_axis)),
np.array(sorted(exist_axis))):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj)._consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex(
Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = {tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)}
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ','.join(pprint_thing(item) for
item in items))
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if (data_columns and len(b_items) == 1 and
b_items[0] in data_columns):
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError("Incompatible appended table [%s] with "
"existing table [%s]"
% (blocks, existing_table.values_axes))
else:
existing_col = None
try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b, block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
info=self.info)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s"
% (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError(
"cannot find the field [%s] for filtering!" % field)
obj = process_filter(field, filt)
return obj
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1)
coords = coords[
op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(self, column, where=None, start=None, stop=None, **kwargs):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [%s] can not be extracted individually; it is "
"not data indexable" % column)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors
).take_data(),
a.tz, True), name=column)
raise KeyError("column [%s] not found in the table" % column)
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = u('worm')
def read(self, **kwargs):
""" read the indices and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORKTable needs to implement write")
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
(possibly) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
"""
_indexables = [
IndexCol(name='index', axis=1, pos=0),
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
table_type = u('legacy')
ndim = 3
def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
lst_vals = [a.values for a in self.index_axes]
labels, levels = _factorize_from_iterables(lst_vals)
# labels and levels are tuples but lists are expected
labels = list(labels)
levels = list(levels)
N = [len(lvl) for lvl in levels]
# compute the key
key = _factor_indexer(N[1:], labels)
objs = []
if len(unique(key)) == len(key):
sorter, _ = algos.groupsort_indexer(
ensure_int64(key), np.prod(N))
sorter = ensure_platform_int(sorter)
# create the objs
for c in self.values_axes:
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
if sorted_values.ndim == 1:
sorted_values = sorted_values.reshape(
(sorted_values.shape[0], 1))
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
block = _block2d_to_blocknd(
values=sorted_values, placement=np.arange(len(items)),
shape=tuple(N), labels=take_labels, ref_items=items)
# create the object
mgr = BlockManager([block], [items] + levels)
obj = self.obj_type(mgr)
# permute if needed
if self.is_transposed:
obj = obj.transpose(
*tuple(Series(self.data_orientation).argsort()))
objs.append(obj)
else:
warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
[i.values for i in self.index_axes])
for c in self.values_axes:
lp = DataFrame(c.data, index=long_index, columns=c.values)
# need a better algorithm
tuple_index = long_index.values
unique_tuples = unique(tuple_index)
unique_tuples = com._asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = ensure_platform_int(indexer)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = DataFrame(new_values, index=new_index, columns=lp.columns)
objs.append(lp.to_panel())
# create the composite object
if len(objs) == 1:
wp = objs[0]
else:
wp = concat(objs, axis=0, verify_integrity=False)._consolidate()
# apply the selection filters & axis orderings
wp = self.process_axes(wp, columns=columns)
return wp
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
pandas_kind = u('frame_table')
table_type = u('legacy_frame')
obj_type = Panel
def read(self, *args, **kwargs):
return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
table_type = u('legacy_panel')
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
table_type = u('appendable')
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
chunksize=None, expectedrows=None, dropna=False, **kwargs):
if not append and self.is_exists:
self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
min_itemsize=min_itemsize,
**kwargs)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows)
# set the table attributes
self.set_attrs()
# create the table
self._handle.create_table(self.group, **options)
else:
pass
# table = self.table
# update my info
self.set_info()
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
try:
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
except Exception as detail:
raise Exception("cannot create row-data -> %s" % detail)
try:
if len(rows):
self.table.append(rows)
self.table.flush()
except Exception as detail:
raise TypeError("tables cannot write this data -> %s" % detail)
def delete(self, where=None, start=None, stop=None, **kwargs):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
self.selection = Selection(
self, where, start=start, stop=stop, **kwargs)
values = self.selection.select_coords()
# delete the rows in reverse order
l = Series(values).sort_values()
ln = len(l)
if ln:
# construct groups of consecutive rows
diff = l.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = l.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
pandas_kind = u('frame_table')
table_type = u('appendable_frame')
ndim = 2
obj_type = DataFrame
@property
def is_transposed(self):
return self.index_axes[0].axis == 1
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.T
return obj
def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
info = (self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get('type') == 'MultiIndex':
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get('names')
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, 'name', None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, 'name', None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
# apply the selection filters & axis orderings
df = self.process_axes(df, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_series')
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self):
return False
def get_object(self, obj):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(self, columns=None, **kwargs):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_multiseries')
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u('frame_table')
table_type = u('generic_table')
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self):
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, 'table', None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a.infer(self)
for a in self.indexables if a.is_an_indexable]
self.values_axes = [a.infer(self)
for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@property
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = u('appendable_multiframe')
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self):
return u('appendable_multi')
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super(AppendableMultiFrameTable, self).write(
obj=obj, data_columns=data_columns, **kwargs)
def read(self, **kwargs):
df = super(AppendableMultiFrameTable, self).read(**kwargs)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names([
None if self._re_levels.search(l) else l for l in df.index.names
])
return df
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
table_type = u('appendable_panel')
ndim = 3
obj_type = Panel
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.transpose(*self.data_orientation)
return obj
@property
def is_transposed(self):
return self.data_orientation != tuple(range(self.ndim))
def _reindex_axis(obj, axis, labels, other=None):
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()) & labels
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except:
idx = info[name] = dict()
return idx
# tz to/from coercion
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
if zone is None:
zone = tz.utcoffset().total_seconds()
return zone
def _set_tz(values, tz, preserve_UTC=False, coerce=False):
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if tz is not None:
name = getattr(values, 'name', None)
values = values.ravel()
tz = timezones.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
if values.tz is None:
values = values.tz_localize('UTC').tz_convert(tz)
if preserve_UTC:
if tz == 'UTC':
values = list(values)
elif coerce:
values = np.asarray(values, dtype='M8[ns]')
return values
def _convert_index(index, encoding=None, errors='strict', format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, TimedeltaIndex):
converted = index.asi8
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
# avoid to store ndarray of Period objects
return IndexCol(index._ndarray_values, 'integer', atom,
freq=getattr(index, 'freq', None),
index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
inferred_type = lib.infer_dtype(index)
values = np.asarray(index)
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'timedelta64':
converted = values.view('i8')
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.asarray([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return IndexCol(converted, 'datetime', _tables().Time64Col(),
index_name=index_name)
elif inferred_type == 'date':
converted = np.asarray([v.toordinal() for v in values],
dtype=np.int32)
return IndexCol(converted, 'date', _tables().Time32Col(),
index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
converted, 'string', _tables().StringCol(itemsize),
itemsize=itemsize, index_name=index_name
)
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
"[unicode] is not supported as a in index type for [{0}] formats"
.format(format_type)
)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
def _unconvert_index(data, kind, encoding=None, errors='strict'):
kind = _ensure_decoded(kind)
if kind == u('datetime64'):
index = DatetimeIndex(data)
elif kind == u('timedelta64'):
index = TimedeltaIndex(data)
elif kind == u('datetime'):
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind == u('date'):
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
elif kind in (u('integer'), u('float')):
index = np.asarray(data)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding,
errors=errors)
elif kind == u('object'):
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None,
errors='strict'):
kind = _ensure_decoded(kind)
if kind == u('datetime'):
index = to_datetime(data)
elif kind in (u('integer')):
index = np.asarray(data, dtype=object)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding,
errors=errors)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _convert_string_array(data, encoding, errors, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
errors : handler for encoding errors
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = Series(data.ravel()).str.encode(
encoding, errors).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
ensured = ensure_object(data.ravel())
itemsize = libwriters.max_len_string_array(ensured)
data = np.asarray(data, dtype="S%d" % itemsize)
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None,
errors='strict'):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
errors : handler for encoding errors, default 'strict'
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding in PY3 (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
if isinstance(data[0], compat.binary_type):
data = Series(data).str.decode(encoding, errors=errors).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = 'nan'
data = libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values, val_kind, encoding, errors):
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind, encoding, errors):
kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
return lambda x: to_datetime(x, cache=True).to_pydatetime()
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding,
errors=errors)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
kind = _ensure_decoded(kind)
if kind in (u('datetime'), u('datetime64'), u('string')):
return True
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(self, table, where=None, start=None, stop=None, **kwargs):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where)
if inferred == 'integer' or inferred == 'boolean':
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if ((self.start is not None and
(where < self.start).any()) or
(self.stop is not None and
(where >= self.stop).any())):
raise ValueError(
"where must have index locations >= start and "
"< stop"
)
self.coordinates = where
except:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
except NameError:
# raise a nice message, suggesting that the user should use
# data_columns
raise ValueError(
"The passed where expression: {0}\n"
" contains an invalid variable reference\n"
" all of the variable references must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
" The currently defined references are: {1}\n"
.format(where, ','.join(q.keys()))
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(self.condition.format(),
start=self.start,
stop=self.stop)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
# utilities ###
def timeit(key, df, fn=None, remove=True, **kwargs):
if fn is None:
fn = 'timeit.h5'
store = HDFStore(fn, mode='w')
store.append(key, df, **kwargs)
store.close()
if remove:
os.remove(fn)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It adds support for dictionaries as a level of nesting in nested structures.
2. It removes support for lists as a level of nesting in nested structures.
The motivation for this change is twofold:
1. Many input-processing functions (e.g. `tf.parse_example()`) return
dictionaries, and we would like to support them natively in datasets.
2. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python.util.all_util import remove_undocumented
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# This is a dict. Iterate over the keys in sorted order to make
# this deterministic.
return {k: v for k, v in zip(sorted(instance.keys()), args)}
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _elements_of(nest):
if isinstance(nest, dict):
# Iterate over dict keys in sorted order to make this deterministic.
return [v for _, v in sorted(nest.items())]
else:
return nest
def _yield_flat_nest(nest):
for n in _elements_of(nest):
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def is_sequence(seq):
"""Returns a true if `seq` is a Sequence or dict (except strings/lists).
NOTE(mrry): This differs from `tensorflow.python.util.nest.is_sequence()`,
which *does* treat a Python list as a sequence. For ergonomic
reasons, `tf.contrib.data` users would prefer to treat lists as
implict `tf.Tensor` objects, and dicts as (nested) sequences.
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string or list and is a
collections.Sequence.
"""
return (isinstance(seq, (_collections.Sequence, dict))
and not isinstance(seq, (list, _six.string_types)))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, this returns a single-element list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the flattened version of the input.
"""
return list(_yield_flat_nest(nest)) if is_sequence(nest) else [nest]
def _recursive_assert_same_structure(nest1, nest2, check_types):
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure. "
"First structure: %s, second structure: %s." % (nest1, nest2))
if is_sequence_nest1:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if check_types and type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
for n1, n2 in zip(_elements_of(nest1), _elements_of(nest2)):
_recursive_assert_same_structure(n1, n2, check_types)
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as
well. If set to `False`, for example a list and a tuple of objects will
look same if they have the same size.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements. First structure: %s, second structure: %s."
% (nest1, nest2))
_recursive_assert_same_structure(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that acceps as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_elements_of(shallow_tree),
_elements_of(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
_allowed_symbols = [
"assert_same_structure",
"is_sequence",
"flatten",
"pack_sequence_as",
"map_structure",
"assert_shallow_structure",
"flatten_up_to",
"map_structure_up_to",
]
remove_undocumented(__name__, _allowed_symbols)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import time
from random import choice
import string
import os
import datetime
import socket
from openerp import tools
from openerp.modules.module import get_module_resource
from openerp.osv import fields, osv
import openerp.report
from openerp.tools.translate import _
class survey_send_invitation(osv.osv_memory):
_name = 'survey.send.invitation'
_columns = {
'partner_ids': fields.many2many('res.partner','survey_res_partner','partner_id',\
'survey_id', "Answer", required=1),
'send_mail': fields.boolean('Send Mail for New User'),
'send_mail_existing': fields.boolean('Send Reminder for Existing User'),
'mail_subject': fields.char('Subject', size=256),
'mail_subject_existing': fields.char('Subject', size=256),
'mail_from': fields.char('From', size=256, required=1),
'mail': fields.text('Body')
}
_defaults = {
'send_mail': lambda *a: 1,
'send_mail_existing': lambda *a: 1,
}
def genpasswd(self):
chars = string.letters + string.digits
return ''.join([choice(chars) for i in range(6)])
def default_get(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
data = super(survey_send_invitation, self).default_get(cr, uid, fields_list, context)
survey_obj = self.pool.get('survey')
msg = ""
name = ""
for sur in survey_obj.browse(cr, uid, context.get('active_ids', []), context=context):
name += "\n --> " + sur.title + "\n"
if sur.state != 'open':
msg += sur.title + "\n"
data['mail_subject'] = _("Invitation for %s") % (sur.title)
data['mail_subject_existing'] = _("Invitation for %s") % (sur.title)
data['mail_from'] = sur.responsible_id.email
if msg:
raise osv.except_osv(_('Warning!'), _('The following surveys are not in open state: %s') % msg)
data['mail'] = _('''
Hello %%(name)s, \n\n
Would you please spent some of your time to fill-in our survey: \n%s\n
You can access this survey with the following parameters:
URL: %s
Your login ID: %%(login)s\n
Your password: %%(passwd)s\n
\n\n
Thanks,''') % (name, self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context))
return data
def create_report(self, cr, uid, res_ids, report_name=False, file_name=False):
if not report_name or not res_ids:
return (False, Exception('Report name and Resources ids are required !!!'))
try:
ret_file_name = get_module_resource('survey', 'report') + file_name + '.pdf'
result, format = openerp.report.render_report(cr, uid, res_ids, report_name[len('report.'):], {}, {})
fp = open(ret_file_name, 'wb+');
fp.write(result);
fp.close();
except Exception,e:
print 'Exception in create report:',e
return (False, str(e))
return (True, ret_file_name)
def action_send(self, cr, uid, ids, context=None):
if context is None:
context = {}
record = self.read(cr, uid, ids, [],context=context)
survey_ids = context.get('active_ids', [])
record = record and record[0]
partner_ids = record['partner_ids']
user_ref= self.pool.get('res.users')
survey_ref= self.pool.get('survey')
mail_message = self.pool.get('mail.message')
model_data_obj = self.pool.get('ir.model.data')
group_id = model_data_obj._get_id(cr, uid, 'base', 'group_survey_user')
group_id = model_data_obj.browse(cr, uid, group_id, context=context).res_id
act_id = self.pool.get('ir.actions.act_window')
act_id = act_id.search(cr, uid, [('res_model', '=' , 'survey.name.wiz'), \
('view_type', '=', 'form')])
out = "login,password\n"
skipped = 0
existing = ""
created = ""
error = ""
new_user = []
attachments = {}
current_sur = survey_ref.browse(cr, uid, context.get('active_id'), context=context)
exist_user = current_sur.invited_user_ids
if exist_user:
for use in exist_user:
new_user.append(use.id)
for id in survey_ref.browse(cr, uid, survey_ids):
report = self.create_report(cr, uid, [id.id], 'report.survey.form', id.title)
file = open(get_module_resource('survey', 'report') + id.title +".pdf")
file_data = ""
while 1:
line = file.readline()
file_data += line
if not line:
break
file.close()
attachments[id.title +".pdf"] = file_data
os.remove(get_module_resource('survey', 'report') + id.title +".pdf")
for partner in self.pool.get('res.partner').browse(cr, uid, partner_ids):
if not partner.email:
skipped+= 1
continue
user = user_ref.search(cr, uid, [('login', "=", partner.email)])
if user:
if user[0] not in new_user:
new_user.append(user[0])
user = user_ref.browse(cr, uid, user[0])
user_ref.write(cr, uid, user.id, {'survey_id':[[6, 0, survey_ids]]})
mail = record['mail']%{'login':partner.email, 'passwd':user.password, \
'name' : partner.name}
if record['send_mail_existing']:
vals = {
'state': 'outgoing',
'subject': record['mail_subject_existing'],
'body_html': '<pre>%s</pre>' % mail,
'email_to': partner.email,
'email_from': record['mail_from'],
}
self.pool.get('mail.mail').create(cr, uid, vals, context=context)
existing+= "- %s (Login: %s, Password: %s)\n" % (user.name, partner.email, \
user.password)
continue
passwd= self.genpasswd()
out+= partner.email + ',' + passwd + '\n'
mail= record['mail'] % {'login' : partner.email, 'passwd' : passwd, 'name' : partner.name}
if record['send_mail']:
vals = {
'state': 'outgoing',
'subject': record['mail_subject'],
'body_html': '<pre>%s</pre>' % mail,
'email_to': partner.email,
'email_from': record['mail_from'],
}
if attachments:
vals['attachment_ids'] = [(0,0,{'name': a_name,
'datas_fname': a_name,
'datas': str(a_content).encode('base64')})
for a_name, a_content in attachments.items()]
ans = self.pool.get('mail.mail').create(cr, uid, vals, context=context)
if ans:
res_data = {'name': partner.name or _('Unknown'),
'login': partner.email,
'password': passwd,
'address_id': partner.id,
'groups_id': [[6, 0, [group_id]]],
'action_id': act_id[0],
'survey_id': [[6, 0, survey_ids]]
}
create_ctx = dict(context, no_reset_password=True)
user = user_ref.create(cr, uid, res_data, context=create_ctx)
if user not in new_user:
new_user.append(user)
created+= "- %s (Login: %s, Password: %s)\n" % (partner.name or _('Unknown'),\
partner.email, passwd)
else:
error+= "- %s (Login: %s, Password: %s)\n" % (partner.name or _('Unknown'),\
partner.email, passwd)
new_vals = {}
new_vals.update({'invited_user_ids':[[6,0,new_user]]})
survey_ref.write(cr, uid, context.get('active_id'),new_vals)
note= ""
if created:
note += 'Created users:\n%s\n\n' % (created)
if existing:
note +='Already existing users:\n%s\n\n' % (existing)
if skipped:
note += "%d contacts where ignored (an email address is missing).\n\n" % (skipped)
if error:
note += 'Email not send successfully:\n====================\n%s\n' % (error)
context.update({'note' : note})
return {
'view_type': 'form',
"view_mode": 'form',
'res_model': 'survey.send.invitation.log',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context
}
class survey_send_invitation_log(osv.osv_memory):
_name = 'survey.send.invitation.log'
_columns = {
'note' : fields.text('Log', readonly=1)
}
def default_get(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
data = super(survey_send_invitation_log, self).default_get(cr, uid, fields_list, context)
data['note'] = context.get('note', '')
return data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import json
import escher
import os
import logging
import sys
import networkx
from cobra.core import Metabolite
LOGGER = logging.getLogger(__name__)
ESCHER_OPTIONS_WEB = {"js_source": "web",
"menu": "none",
"scroll_behavior": "zoom",
"html_wrapper": True,
"protocol": "https"}
ESCHER_OPTIONS_LOCAL = ESCHER_OPTIONS_WEB.copy()
ESCHER_OPTIONS_LOCAL["js_source"] = "local"
class WrongEscherFormat(BaseException):
def __init__(self, *args):
super(WrongEscherFormat, self).__init__(*args)
class MapWrapper:
""" Wrapper for loaded map """
def __init__(self, map_json=None, path=None):
self._map_json = None
self._file_path = None
self._reaction_ids = set()
self.set_map_json(map_json, path)
def set_map_json(self, map_json, path):
""" Set the map json
Parameters
----------
json: str, String containing an escher map
path: str, Path to the map file
Returns
-------
None
"""
self._map_json = map_json
self._file_path = path
self._parse_json()
def _parse_json(self):
""" Parse map json and populate reaction ids
Returns
-------
Raises
------
JSONDecodeError
If the map json is not a valid JSON file
WrongEscherFormat
If there is a problem while parsing JSON
"""
parsed = json.loads(self._map_json)
try:
node = parsed[1]
reactions_dict = node["reactions"]
reaction_ids = set(v["bigg_id"] for v in reactions_dict.values())
except:
tb = sys.exc_info()[2]
raise WrongEscherFormat("Error parsing reaction ids").with_traceback(tb)
else:
self._reaction_ids = reaction_ids
def get_html(self, reaction_data=None, gene_data=None, metabolite_data=None):
""" Generate the html from map
Parameters
----------
reaction_data: dict
metabolite_data: dict
gene_data: dict
Returns
-------
map_html: str
"""
builder = escher.Builder(map_json=self._map_json,
reaction_data=reaction_data,
gene_data=gene_data,
metabolite_data=metabolite_data)
return builder._get_html(**ESCHER_OPTIONS_WEB)
@property
def display_path(self):
if isinstance(self._file_path, str):
return os.path.basename(self._file_path)
else:
return str(self._file_path)
def __contains__(self, item):
if hasattr(item, "id"):
return item.id in self._reaction_ids
elif isinstance(item, str):
return item in self._reaction_ids
else:
return False
class MapGraph(networkx.Graph):
def __init__(self, *args, **kwargs):
super(MapGraph, self).__init__(*args, **kwargs)
@property
def reactions(self):
return set(t[0] for t in self.nodes() if isinstance(t, tuple))
def replace_css_paths(html):
escher_path = os.path.dirname(escher.__file__)
full_path = "file://"+escher_path + "/static"
replaced = html.replace('escher/static', full_path.replace("\\", "/"))
return replaced
def canvas_size(positions, params):
""" Calculate the canvas size from node positions
Parameters
----------
positions: dict,
Dictionary containg all positions of the nodes
params: dict,
Dictionary containing the mapping parameters
Returns
-------
width: float or int,
Canvas width
height: float or int,
Canvas height
"""
width = max(p[0] for p in positions.values()) + 2 * params["x_margin"]
height = max(p[1] for p in positions.values()) + 2 * params["y_margin"]
return width, height
def get_subnodes(reaction):
""" Get all the subnodes for a reaction
Parameters
----------
reaction :
Returns
-------
"""
return (reaction, "educts"), (reaction, "middle"), (reaction, "products")
def add_subnodes(graph, reaction):
""" NetworkX graph
Parameters
----------
graph: networkx.Graph,
Graph containing the map nodes
reaction:
Reaction to be plotted
Returns
-------
"""
educt_node, middle_node, product_node = get_subnodes(reaction)
graph.add_nodes_from([educt_node, middle_node, product_node])
graph.add_edge(educt_node, middle_node)
graph.add_edge(middle_node, product_node)
return educt_node, middle_node, product_node
def entry_from_metabolite_node(node, x, y):
""" Generate a metabolite entry for use in Escher map
Nodes are expected to be of the form:
<Metabolite>
(<Reaction>, <Metabolite>)
(<Reaction>, "middle")
(<Reaction>, "educts")
(<Reaction>, "products")
Parameters
----------
node: Metabolite or tuple,
The node for which to generate an entry
x: float or int,
x position of the metabolite on the map
y: float or int,
y position of the metabolite on the map
Returns
-------
dict,
Metabolite entry
"""
entry = {"x": x, "y": y}
def add_metabolite_info(metabolite, is_primary):
label_offset = 25 if is_primary else 10
entry.update({"bigg_id": metabolite.id, "name": metabolite.name,
"label_x": x + label_offset, "label_y": y + label_offset,
"node_is_primary": is_primary, "node_type": "metabolite"})
# Add appropriate information
if isinstance(node, Metabolite):
add_metabolite_info(node, is_primary=True)
elif isinstance(node[1], Metabolite):
add_metabolite_info(node[1], is_primary=False)
elif node[1] == "middle":
entry["node_type"] = "midmarker"
else:
entry["node_type"] = "multimarker"
return entry
def entry_from_reaction(graph, reaction, node_index, positions, counter):
segments = {}
json_metabolites = []
educt_node, middle_node, product_node = get_subnodes(reaction)
# Get the middle node of the reaction
middle_node_id = node_index[middle_node]
x, y = positions[middle_node]
for metabolite, stoichiometry in reaction.metabolites.items():
json_metabolites.append({"coefficient": stoichiometry, "bigg_id": metabolite.id})
for node in (metabolite, (reaction, metabolite)):
for edge in graph.edges([node]):
node1, node2 = edge
segments[counter()] = {"from_node_id": node_index[node1], "to_node_id": node_index[node2], "b1": None, "b2": None}
# Connect intermediate nodes to middle node
for intermediate_node in (educt_node, product_node):
if intermediate_node in node_index:
segments[counter()] = {"from_node_id": node_index[intermediate_node],
"to_node_id": middle_node_id, "b1": None, "b2": None}
return {"name": reaction.name or reaction.id,
"bigg_id": reaction.id,
"reversibility": reaction.lower_bound < 0. < reaction.upper_bound,
"label_x": x+10,
"label_y": y+10,
"gene_reaction_rule": reaction.gene_reaction_rule,
"genes": [dict([("bigg_id", x.id), ("name", x.name)]) for x in reaction.genes],
"metabolites": json_metabolites,
"segments": segments}
def get_escher_json(graph, positions, params):
# Generate unique numeric ids
class Counter:
def __init__(self):
self.count = -1
def __call__(self, *args, **kwargs):
self.count += 1
return str(self.count)
counter = Counter()
result = [{"map_name": "test_name",
"map_id": "1234565",
"map_description": "test",
"homepage": "https://escher.github.io",
"schema": "https://escher.github.io/escher/jsonschema/1-0-0#"}]
nodes = {}
node_index = {}
for node in graph.nodes():
index = counter()
node_index[node] = index
x, y = positions[node]
nodes[index] = entry_from_metabolite_node(node, x, y)
reactions_dict = {}
for reaction in graph.reactions:
reactions_dict[counter()] = entry_from_reaction(graph, reaction, node_index, positions, counter)
width, height = canvas_size(positions, params)
result.append({"reactions": reactions_dict, "nodes": nodes, "text_labels": {},
"canvas": {"x": 0., "y": 0., "width": width, "height": height}})
return json.dumps(result)
|
# Copyright (C) 2009,2010 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from gi.repository import Gtk, GObject
from gettext import gettext as _
from softwarecenter.enums import SortMethods
from softwarecenter.ui.gtk3.em import StockEms
from softwarecenter.ui.gtk3.models.appstore2 import AppTreeStore
from softwarecenter.ui.gtk3.widgets.apptreeview import AppTreeView
from softwarecenter.ui.gtk3.models.appstore2 import AppPropertiesHelper
from softwarecenter.utils import ExecutionTime
LOG = logging.getLogger(__name__)
class AppView(Gtk.VBox):
__gsignals__ = {
"sort-method-changed": (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_PYOBJECT, ),
),
"application-activated": (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_PYOBJECT, ),
),
"application-selected": (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_PYOBJECT, ),
),
}
(INSTALLED_MODE, AVAILABLE_MODE, DIFF_MODE) = range(3)
_SORT_METHOD_INDEX = (SortMethods.BY_ALPHABET,
SortMethods.BY_TOP_RATED,
SortMethods.BY_CATALOGED_TIME,
SortMethods.BY_SEARCH_RANKING,
)
# indices that relate to the above tuple
_SORT_BY_ALPHABET = 0
_SORT_BY_TOP_RATED = 1
_SORT_BY_NEWEST_FIRST = 2
_SORT_BY_SEARCH_RANKING = 3
def __init__(self, db, cache, icons, show_ratings):
Gtk.VBox.__init__(self)
#~ self.set_name("app-view")
# app properties helper
with ExecutionTime("Appview.__init__ create AppPropertiesHelper"):
self.helper = AppPropertiesHelper(db, cache, icons)
# misc internal containers
self.header_hbox = Gtk.HBox()
self.header_hbox.set_border_width(StockEms.MEDIUM)
self.pack_start(self.header_hbox, False, False, 0)
self.tree_view_scroll = Gtk.ScrolledWindow()
self.pack_start(self.tree_view_scroll, True, True, 0)
# category label
self.header_label = Gtk.Label()
self.header_label.set_use_markup(True)
self.header_hbox.pack_start(self.header_label, False, False, 0)
# sort methods comboboxs
# variant 1 includes sort by search relevance
self.sort_methods_combobox = self._get_sort_methods_combobox()
combo_alignment = Gtk.Alignment.new(0.5, 0.5, 1.0, 0.0)
combo_alignment.add(self.sort_methods_combobox)
self.header_hbox.pack_end(combo_alignment, False, False, 0)
# content views
self.tree_view = AppTreeView(self, db, icons,
show_ratings, store=None)
self.tree_view_scroll.add(self.tree_view)
self.appcount = None
self.vadj = 0.0
# list view sorting stuff
self._force_default_sort_method = True
self._handler = self.sort_methods_combobox.connect(
"changed",
self.on_sort_method_changed)
#~ def on_draw(self, w, cr):
#~ cr.set_source_rgb(1,1,1)
#~ cr.paint()
def _append_appcount(self, appcount, mode=AVAILABLE_MODE):
#~
#~ if mode == self.INSTALLED_MODE:
#~ text = gettext.ngettext("%(amount)s item installed",
#~ "%(amount)s items installed",
#~ appcount) % { 'amount' : appcount, }
#~ elif mode == self.DIFF_MODE:
#~ text = gettext.ngettext("%(amount)s item",
#~ "%(amount)s items",
#~ appcount) % { 'amount' : appcount, }
#~ else:
#~ text = gettext.ngettext("%(amount)s item available",
#~ "%(amount)s items available",
#~ appcount) % { 'amount' : appcount, }
#~
#~ if not self.appcount:
#~ self.appcount = Gtk.Label()
#~ self.appcount.set_alignment(0.5, 0.5)
#~ self.appcount.set_margin_top(4)
#~ self.appcount.set_margin_bottom(3)
#~ self.appcount.connect("draw", self.on_draw)
#~ self.vbox.pack_start(self.appcount, False, False, 0)
#~ self.appcount.set_text(text)
#~ self.appcount.show()
pass
def on_sort_method_changed(self, *args):
self.vadj = 0.0
self.emit("sort-method-changed", self.sort_methods_combobox)
def _get_sort_methods_combobox(self):
combo = Gtk.ComboBoxText.new()
combo.append_text(_("By Name"))
combo.append_text(_("By Top Rated"))
combo.append_text(_("By Newest First"))
combo.append_text(_("By Relevance"))
combo.set_active(self._SORT_BY_TOP_RATED)
return combo
def _get_combo_children(self):
return len(self.sort_methods_combobox.get_model())
def _use_combobox_with_sort_by_search_ranking(self):
if self._get_combo_children() == 4:
return
self.sort_methods_combobox.append_text(_("By Relevance"))
def _use_combobox_without_sort_by_search_ranking(self):
if self._get_combo_children() == 3:
return
self.sort_methods_combobox.remove(self._SORT_BY_SEARCH_RANKING)
self.set_sort_method_with_no_signal(self._SORT_BY_TOP_RATED)
def set_sort_method_with_no_signal(self, sort_method):
combo = self.sort_methods_combobox
combo.handler_block(self._handler)
combo.set_active(sort_method)
combo.handler_unblock(self._handler)
def set_allow_user_sorting(self, do_allow):
self.sort_methods_combobox.set_visible(do_allow)
def set_header_labels(self, first_line, second_line):
if second_line:
markup = '%s\n<big><b>%s</b></big>' % (first_line, second_line)
else:
markup = "<big><b>%s</b></big>" % first_line
return self.header_label.set_markup(markup)
def set_model(self, model):
self.tree_view.set_model(model)
def get_model(self):
return self.tree_view.appmodel
def display_matches(self, matches, is_search=False):
# FIXME: installedpane handles display of the trees intimately,
# so for the time being lets just return None in the case of our
# TreeView displaying an AppTreeStore ... ;(
# ... also we don't currently support user sorting in the
# installedview, so issue is somewhat moot for the time being...
if isinstance(self.get_model(), AppTreeStore):
LOG.debug("display_matches called on AppTreeStore, ignoring")
return
model = self.get_model()
# disconnect the model from the view before running
# set_from_matches to ensure that the _cell_data_func_cb is not
# run when the placeholder items are set, otherwise the purpose
# of the "load-on-demand" is gone and it leads to bugs like
# LP: #964433
self.set_model(None)
if model:
model.set_from_matches(matches)
self.set_model(model)
adj = self.tree_view_scroll.get_vadjustment()
if adj:
adj.set_lower(self.vadj)
adj.set_value(self.vadj)
def reset_default_sort_mode(self):
""" force the appview to reset to the default sort method without
doing a refresh or sending any signals
"""
self._force_default_sort_method = True
def configure_sort_method(self, is_search=False):
""" configures the sort method UI appropriately based on current
conditions, including whether a search is in progress.
Note that this will not change the users current sort method,
if that is the intention, call reset_default_sort_mode()
"""
# figure out what combobox we need
if is_search:
self._use_combobox_with_sort_by_search_ranking()
else:
self._use_combobox_without_sort_by_search_ranking()
# and what sorting
if self._force_default_sort_method:
# always reset this, its the job of the user of the appview
# to call reset_default_sort_mode() to reset this
self._force_default_sort_method = False
# and now set the default sort depending on if its a view or not
if is_search:
self.set_sort_method_with_no_signal(
self._SORT_BY_SEARCH_RANKING)
else:
self.set_sort_method_with_no_signal(
self._SORT_BY_TOP_RATED)
def clear_model(self):
return self.tree_view.clear_model()
def get_sort_mode(self):
active_index = self.sort_methods_combobox.get_active()
return self._SORT_METHOD_INDEX[active_index]
def get_app_icon_details(self):
""" helper for unity dbus support to provide details about the
application icon as it is displayed on-screen
"""
icon_size = self._get_app_icon_size_on_screen()
(icon_x, icon_y) = self._get_app_icon_xy_position_on_screen()
return (icon_size, icon_x, icon_y)
def _get_app_icon_size_on_screen(self):
""" helper for unity dbus support to get the size of the maximum side
for the application icon as it is displayed on-screen
"""
icon_size = 32
if (self.tree_view.selected_row_renderer and
self.tree_view.selected_row_renderer.icon):
pb = self.tree_view.selected_row_renderer.icon
if pb.get_width() > pb.get_height():
icon_size = pb.get_width()
else:
icon_size = pb.get_height()
return icon_size
def _get_app_icon_xy_position_on_screen(self):
""" helper for unity dbus support to get the x,y position of
the application icon as it is displayed on-screen
"""
# find top-level parent
parent = self
while parent.get_parent():
parent = parent.get_parent()
# get top-level window position
(px, py) = parent.get_position()
# and return the coordinate values
if self.tree_view.selected_row_renderer:
return (px + self.tree_view.selected_row_renderer.icon_x_offset,
py + self.tree_view.selected_row_renderer.icon_y_offset)
else:
return (px, py)
|
# coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import get_locator, dialogs_replace
class TestLocSc(unittest.TestCase):
def setUp(self):
self.vars = {'city_select_name': 'fromPort', 'input_name_id': 'inputName'}
timeout = 3.5
self.driver = None
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
self.driver = webdriver.Chrome(
service_log_path='/somewhere/webdriver.log',
options=options)
self.driver.implicitly_wait(timeout)
apiritif.put_into_thread_store(scenario_name='loc_sc', timeout=timeout, func_mode=False, windows={},
driver=self.driver)
def _1_Conditions_test(self):
with apiritif.smart_transaction('Conditions test'):
self.driver.get('http://blazedemo.com')
dialogs_replace()
test = self.driver.execute_script('return document.getElementsByName("fromPort")[0].length > 0;')
if test:
var_loc_keys = get_locator([{'id': 'wrong_id'}, {'xpath': '/html/body/div[3]/form/div/input'}])
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).click()
sleep(1.0)
test = self.driver.execute_script('return document.getElementsByClassName("table")[0].rows.length > 5;')
if test:
var_loc_keys = get_locator([{'xpath': '/html/body/div[2]/table/tbody/tr[5]/td[1]/input'}])
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).click()
test = self.driver.execute_script(
'return document.getElementById("{}").value === \'\';'.format(self.vars['input_name_id']))
if test:
var_loc_keys = get_locator([{'id': self.vars['input_name_id']}])
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).clear()
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).send_keys('John Doe')
else:
var_loc_keys = get_locator([{'id': self.vars['input_name_id']}])
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).clear()
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).send_keys('Jack Green')
var_loc_keys = get_locator([{'xpath': '/html/body/div[2]/form/div[11]/div/input'}])
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).click()
sleep(5.0)
else:
test = self.driver.execute_script('return document.getElementsByClassName("table")[0].rows.length > 5;')
if test:
var_loc_keys = get_locator([{'id': self.vars['elem2_id']}])
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).clear()
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).send_keys('my text')
test = self.driver.execute_script('return window.screen.width > 1000;')
if test:
self.driver.save_screenshot('file_1000')
else:
self.driver.save_screenshot('file')
else:
var_loc_keys = get_locator([{'xpath': '/html/body/div[3]/input'}])
self.driver.find_element(
var_loc_keys[0],
var_loc_keys[1]).click()
def test_locsc(self):
self._1_Conditions_test()
def tearDown(self):
if self.driver:
self.driver.quit()
|
import data_io
import pandas as pd
def index_rename(idx):
return "r" + str(idx)
def reverse(X, y, info, direction):
y = y.copy()
X = X.copy()
idx = y[y['Target']==-direction]
#print len(idx)
y = y.ix[idx.index]
y['Target'] = direction
X = X.ix[idx.index]
info = info.ix[idx.index]
# reverse columns
X.columns = ['B', 'A']
X = X[['A', 'B']]
info.columns = ['B type', 'A type']
info = info[['A type', 'B type']]
X = X.rename(index = index_rename)
y = y.rename(index = index_rename)
info = info.rename(index = index_rename)
print "Data to be appended", X.shape,y.shape, direction
return X,y,info
def exploit_symmetries(X, y, info):
reversed_values = [reverse(X,y,info,0), reverse(X,y,info,1), reverse(X,y,info,-1)]
for data in reversed_values:
X = X.append(data[0])
y = y.append(data[1])
info = info.append(data[2])
return X,y, info
def main():
X = data_io.read_train_pairs()
y = data_io.read_train_target()
info = data_io.read_train_info()
X,y, info = exploit_symmetries(X,y, info)
print X.shape, y.shape
print "-1", len(y[y['Target']==-1])
print "0", len(y[y['Target']==0])
print "1", len(y[y['Target']==1])
# X = X.iloc[:10]
# y = y.iloc[:10]
# info = info.iloc[:10]
data_io.save_train_data(X, "./Competition/CEfinal_train_pairs-sym.csv")
data_io.save(y, "./Competition/CEfinal_train_target-sym.csv")
data_io.save(info, "./Competition/CEfinal_train_publicinfo-sym.csv")
print "finished"
if __name__=="__main__":
main()
|
"""Classes and functions used by multiple modules in the system."""
import uuid
from hashlib import md5
import bcrypt
from voluptuous import Invalid, MultipleInvalid
def token():
"""
Generate a random but insecure token.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hash a string.
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
class PicoException(Exception):
"""
General class for exceptions in the picoCTF API.
Allows specification of a message and response code to display to the
client, as well as an optional field for arbitrary data.
The 'data' field will not be displayed to clients but will be stored
in the database, making it ideal for storing stack traces, etc.
"""
def __init__(self, message, status_code=500, data=None):
"""Initialize a new PicoException."""
Exception.__init__(self)
self.message = message
self.status_code = status_code
self.data = data
def to_dict(self):
"""Convert a PicoException to a dict for serialization."""
rv = dict()
rv["message"] = self.message
return rv
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our PicoException.
Args:
callback_tuples: a callback_tuple should contain
(status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Try to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
PicoException with 400 status code and error msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise PicoException(msg, 400)
return value
return v
def validate(schema, data):
"""
Wrap the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
PicoException with 400 status code and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise PicoException(error.msg, 400)
def hash_password(password):
"""
Hash plaintext password.
Args:
password: plaintext password
Returns:
Secure hash of password.
"""
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(8))
|
#!/usr/bin/env python
# encoding: utf-8
# Created by Brian Cherinka on 2016-05-17 10:17:35
# Licensed under a 3-clause BSD license.
# Revision History:
# Initial Version: 2016-05-17 10:17:35 by Brian Cherinka
# Last Modified On: 2016-05-17 10:17:35 by Brian
from __future__ import division, print_function
import inspect
from astropy.io import fits
from marvin import config
from marvin.core.exceptions import MarvinError
from marvin.tools.cube import Cube
from marvin.utils.general.structs import FuzzyList
from .core import MarvinToolsClass
try:
from sdss_access.path import Path
except ImportError:
Path = None
class Plate(MarvinToolsClass, FuzzyList):
'''A class to interface with MaNGA Plate.
This class represents a Plate, initialised either
from a file, a database, or remotely via the Marvin API. The class
inherits from Python's list class, and is defined as a list of
Cube objects. As it inherits from list, it can do all the standard Python
list operations.
When instanstantiated, Marvin Plate will attempt to discover and load all the Cubes
associated with this plate.
Parameters:
plate (str):
The plate id of the Plate to load.
plateifu (str):
The plate-ifu of the Plate to load
filename (str):
The path of the file containing the data cube to load.
mode ({'local', 'remote', 'auto'}):
The load mode to use. See
:doc:`Mode secision tree</mode_decision>`..
release (str):
The MPL/DR version of the data to use.
nocubes (bool):
Set this to turn off the Cube loading
Attributes:
cubeXXXX (object):
The Marvin Cube object for the given ifu, e.g. cube1901 refers to the Cube for plateifu 8485-1901
plate/plateid (int):
The plate id for this plate
cartid (str):
The cart id for this plate
designid (int):
The design id for this plate
ra (float):
The RA of the plate center
dec (float):
The declination of the plate center
dateobs (str):
The date of observation for this plate
surveymode (str):
The survey mode for this plate
isbright (bool):
True if this is a bright time plate
Return:
plate:
An object representing the Plate entity. The object is a list of
Cube objects, one for each IFU cube in the Plate entity.
Example:
>>> from marvin.tools.plate import Plate
>>> plate = Plate(plate=8485)
>>> print(plate)
>>> <Marvin Plate (plate=8485, n_cubes=17, mode='local', data_origin='db')>
>>>
>>> print('Cubes found in this plate: {0}'.format(len(plate)))
>>> Cubes found in this plate: 4
>>>
>>> # access the plate via index to access the individual cubes
>>> plate[0]
>>> <Marvin Cube (plateifu='8485-12701', mode='local', data_origin='db')>
>>>
>>> # or by name
>>> plate['12702']
>>> <Marvin Cube (plateifu='8485-12702', mode='local', data_origin='db')>
>>>
'''
def __init__(self, input=None, filename=None, mangaid=None, plateifu=None,
mode=None, data=None, release=None, plate=None,
download=None, nocubes=None):
self._cubes = None
self._plate = None
self._pdict = None
self.platedir = None
self.nocubes = nocubes
# If plateid specified, force a temp plateifu
if plate:
self.plateid = plate
plateifu = '{0}-XXXX'.format(self.plateid)
self.plateifu = plateifu
args = [plate, plateifu]
assert any(args), 'Enter plate or plateifu!'
MarvinToolsClass.__init__(self, input=input, filename=filename, mangaid=mangaid,
plateifu=plateifu, mode=mode, data=data, release=release,
download=download)
# sort out any plateid, plate-ifu, mangaid name snafus
self._sortOutNames()
# grab the plate info
if self.data_origin == 'file':
self._getPlateFromFile()
elif self.data_origin == 'db':
self._getPlateFromDB()
elif self.data_origin == 'api':
self._getPlateFromAPI()
# load the plate params and init the Marvin Cubes
self._setParams()
if not self.nocubes:
self._initCubes()
def __repr__(self):
'''Representation for Plate.'''
return ('<Marvin Plate (plate={self.plateid!r}, n_cubes={0}, mode={self.mode!r}, '
'data_origin={self.data_origin!r})>'.format(len(self), self=self))
def __dir__(self):
''' Overriding dir for Plate '''
# get the attributes from the class itself
class_members = list(list(zip(*inspect.getmembers(self.__class__)))[0])
instance_attr = list(self.__dict__.keys())
# get the dir from FuzzyList
listattr = ['cube{0}'.format(i.plateifu.split('-')[1]) for i in self]
listattr.sort()
return listattr + sorted(class_members + instance_attr)
def __getattr__(self, value):
if 'cube' in value:
ifu = value.split('cube')[-1]
plateifu = '{0}-{1}'.format(self.plate, ifu)
return self[plateifu]
return super(Plate, self).__getattribute__(value)
def _getFullPath(self, **kwargs):
"""Returns the full path of the file in the tree."""
self.filename = super(Plate, self)._getFullPath('mangaplate', drpver=self._drpver,
plate=self.plateid, **kwargs)
self.platedir = self.filename
self._checkFilename()
return self.filename
def _getPlateFromFile(self):
''' Initialize a Plate from a Cube/RSS File'''
# Load file
try:
self._hdr = fits.getheader(self.filename, 1)
self.plateid = int(self._hdr['PLATEID'])
except Exception as e:
raise MarvinError('Could not initialize via filename: {0}'
.format(e))
else:
self.data_origin = 'file'
self._makePdict()
def _getPlateFromDB(self):
''' Initialize a Plate from the DB '''
import sqlalchemy
from marvin import marvindb as mdb
if not mdb.isdbconnected:
raise MarvinError('No db connected')
# Grab any cube for this plate
cube = None
try:
cube = mdb.session.query(mdb.datadb.Cube).join(
mdb.datadb.PipelineInfo, mdb.datadb.PipelineVersion).\
filter(mdb.datadb.Cube.plate == self.plateid,
mdb.datadb.PipelineVersion.version == self._drpver).first()
except sqlalchemy.orm.exc.NoResultFound as ee:
raise MarvinError('Could not retrieve Cube for plate {0}: '
'No Results Found: {1}'
.format(self.plateid, ee))
except Exception as ee:
raise MarvinError('Could not retrieve Cube for plate {0}: '
'Unknown exception: {1}'
.format(self.plateid, ee))
else:
# no cube
if not cube:
raise MarvinError('No cube found in db for plate {0}, drpver {1}'
.format(self.plateid, self._drpver))
# cube but no plateclass
try:
self._plate = cube.plateclass
except AttributeError as ee:
raise MarvinError('AttributeError: cube has no plateclass for plate {0}: {1}'
.format(self.plateid, ee))
else:
self._hdr = self._plate._hdr
self._pdict = self._plate.__dict__
self.data_origin = 'db'
if not self._plate:
raise MarvinError('Could not retrieve Plate for id {0}'.format(self.plateid))
def _getPlateFromAPI(self):
''' Initialize a Plate using the API '''
# Checks that the Plate exists.
routeparams = {'plateid': self.plateid}
url = config.urlmap['api']['getPlate']['url'].format(**routeparams)
# Make the API call
response = self._toolInteraction(url)
data = response.getData()
self._hdr = data['header']
self.data_origin = 'api'
self._makePdict()
def _initCubes(self):
''' Initialize a list of Marvin Cube objects '''
_cubes = [None]
if self.data_origin == 'file':
sdss_path = Path(release=self.release)
if self.dir3d == 'stack':
cubes = sdss_path.expand('mangacube', drpver=self._drpver,
plate=self.plateid, ifu='*', wave='LOG')
else:
cubes = sdss_path.expand('mangamastar', drpver=self._drpver,
plate=self.plateid, ifu='*', wave='LOG')
_cubes = [Cube(filename=cube, mode=self.mode, release=self.release) for cube in cubes]
elif self.data_origin == 'db':
_cubes = [Cube(plateifu=cube.plateifu, mode=self.mode, release=self.release)
for cube in self._plate.cubes]
elif self.data_origin == 'api':
routeparams = {'plateid': self.plateid}
url = config.urlmap['api']['getPlateCubes']['url'].format(**routeparams)
# Make the API call
response = self._toolInteraction(url)
data = response.getData()
plateifus = data['plateifus']
_cubes = [Cube(plateifu=pifu, mode=self.mode, release=self.release) for pifu in plateifus]
FuzzyList.__init__(self, _cubes)
self.mapper = (lambda e: e.plateifu)
def _setParams(self):
''' Set the plate parameters '''
self.ra = self._pdict.get('ra', None)
self.dec = self._pdict.get('dec', None)
self.designid = self._pdict.get('designid', None)
self.cartid = self._pdict.get('cartid', None)
self.dateobs = self._pdict.get('dateobs', None)
self.platetype = self._pdict.get('platetype', None)
self.surveymode = self._pdict.get('surveymode', None)
self.isbright = self._pdict.get('isbright', None)
self.dir3d = self._pdict.get('dir3d', None)
self.plateid = int(self.plateid)
def _makePdict(self):
''' Make the necessary plate dictionary '''
self._pdict = {}
self._pdict['ra'] = self._hdr.get('CENRA', None)
self._pdict['dec'] = self._hdr.get('CENDEC', None)
self._pdict['designid'] = self._hdr.get('DESIGNID', None)
self._pdict['cartid'] = self._hdr.get('CARTID', None)
self._pdict['dateobs'] = self._hdr.get('DATE-OBS', None)
self._pdict['platetype'] = self._hdr.get('PLATETYP', None)
self._pdict['surveymode'] = self._hdr.get('SRVYMODE', None)
self._pdict['isbright'] = 'APOGEE' in self._pdict['surveymode']
self._pdict['dir3d'] = 'mastar' if self._pdict['isbright'] else 'stack'
self._pdict['ra'] = float(self._pdict['ra'])
self._pdict['dec'] = float(self._pdict['dec'])
self._pdict['designid'] = float(self._pdict['designid'])
def _sortOutNames(self):
''' Sort out any name issues with plateid, plateifu, mangaid inputs '''
if self.plateifu and 'XXX' not in self.plateifu:
plate, ifu = self.plateifu.split('-')
self.plateid = int(plate)
def _checkFilename(self):
''' Checks the filename for a proper FITS file '''
# if filename is not FITS, then try to load one
if 'fits' not in self.filename.lower():
if not Path:
raise MarvinError('sdss_access is not installed')
else:
# is_public = 'DR' in self._release
# path_release = self._release.lower() if is_public else None
sdss_path = Path(release=self._release)
# try a cube
full = sdss_path.full('mangacube', drpver=self._drpver, plate=self.plateid, ifu='*', wave='LOG')
cubeexists = sdss_path.any('', full=full)
if cubeexists:
file = sdss_path.one('', full=full)
else:
# try an rss
full = sdss_path.full('mangarss', drpver=self._drpver, plate=self.plateid, ifu='*', wave='LOG')
rssexists = sdss_path.any('', full=full)
if rssexists:
file = sdss_path.one('', full=full)
else:
file = None
# load the file
if file:
self.filename = file
else:
self.filename = None
|
#!/usr/bin/env python
import sys
import numpy as np
import gnsstools.glonass.p as p
import gnsstools.nco as nco
import gnsstools.io as io
import gnsstools.discriminator as discriminator
class tracking_state:
def __init__(self,fs,code_p,code_f,code_i,carrier_p,carrier_f,carrier_i,mode):
self.fs = fs
self.code_p = code_p
self.code_f = code_f
self.code_i = code_i
self.carrier_p = carrier_p
self.carrier_f = carrier_f
self.carrier_i = carrier_i
self.mode = mode
self.prompt1 = 0 + 0*(1j)
self.carrier_e1 = 0
self.code_e1 = 0
self.eml = 0
# tracking loops
def track(x,s):
n = len(x)
fs = s.fs
nco.mix(x,-s.carrier_f/fs, s.carrier_p)
s.carrier_p = s.carrier_p - n*s.carrier_f/fs
s.carrier_p = np.mod(s.carrier_p,1)
cf = (s.code_f+s.carrier_f/243.84)/fs
p_early = p.correlate(x, 0, s.code_p-0.5, cf, p.p_code())
p_prompt = p.correlate(x, 0, s.code_p, cf, p.p_code())
p_late = p.correlate(x, 0, s.code_p+0.5, cf, p.p_code())
if s.mode=='FLL_WIDE':
fll_k = 2.0
a = p_prompt
b = s.prompt1
e = discriminator.fll_atan(a,b)
s.carrier_f = s.carrier_f + fll_k*e
s.prompt1 = p_prompt
elif s.mode=='FLL_NARROW':
fll_k = 0.5
a = p_prompt
b = s.prompt1
e = discriminator.fll_atan(a,b)
s.carrier_f = s.carrier_f + fll_k*e
s.prompt1 = p_prompt
elif s.mode=='PLL':
pll_k1 = 0.1
pll_k2 = 5
e = discriminator.pll_costas(p_prompt)
e1 = s.carrier_e1
s.carrier_f = s.carrier_f + pll_k1*e + pll_k2*(e-e1)
s.carrier_e1 = e
# code loop
dll_k1 = 0.00002
dll_k2 = 0.2
s.early = np.absolute(p_early)
s.prompt = np.absolute(p_prompt)
s.late = np.absolute(p_late)
if (s.late+s.early)==0:
e = 0
else:
e = (s.late-s.early)/(s.late+s.early)
s.eml = e
e1 = s.code_e1
s.code_f = s.code_f + dll_k1*e + dll_k2*(e-e1)
s.code_e1 = e
s.code_p = s.code_p + n*cf
s.code_p = np.mod(s.code_p,p.code_length)
return p_prompt,s
#
# main program
#
# parse command-line arguments
# example:
# ./track-glonass-l2-p.py /dev/stdin 68873142.857 6283428.571 1 385.0 1841430.6
filename = sys.argv[1] # input data, raw file, i/q interleaved, 8 bit signed (two's complement)
fs = float(sys.argv[2]) # sampling rate, Hz
coffset = float(sys.argv[3]) # offset to L1 carrier (1602.000 MHz), Hz (positive or negative)
chan = int(sys.argv[4]) # GLONASS channel number, -7..6
doppler = float(sys.argv[5]) # initial doppler estimate from acquisition
code_offset = float(sys.argv[6]) # initial code offset from acquisition
fp = open(filename,"rb")
n = int(fs*1.000*((p.code_length-code_offset)/p.code_length)) # align with 1000 ms code boundary
x = io.get_samples_complex(fp,n)
code_offset += n*1.0*p.code_length/fs
s = tracking_state(fs=fs, # initialize tracking state
code_p=code_offset, code_f=p.chip_rate, code_i=0,
carrier_p=0, carrier_f=doppler, carrier_i=0,
# mode='FLL_NARROW')
mode='PLL')
block = 0
coffset_phase = 0.0
while True:
if s.code_p<p.code_length/2:
n = int(fs*1.000*(p.code_length-s.code_p)/p.code_length)
else:
n = int(fs*1.000*(2*p.code_length-s.code_p)/p.code_length)
x = io.get_samples_complex(fp,n)
if x==None:
break
fm = -(coffset+437500*chan)/fs
nco.mix(x,fm,coffset_phase)
coffset_phase = coffset_phase + n*fm
coffset_phase = np.mod(coffset_phase,1)
for j in range(1000):
a,b = int(j*n/1000),int((j+1)*n/1000)
p_prompt,s = track(x[a:b],s)
print block, np.real(p_prompt), np.imag(p_prompt), s.carrier_f, s.code_f-p.chip_rate, (180/np.pi)*np.angle(p_prompt), s.early, s.prompt, s.late
block = block + 1
# if (block%100)==0:
# sys.stderr.write("%d\n"%block)
# if block==1000:
# s.mode = 'FLL_NARROW'
# if block==1000:
# s.mode = 'PLL'
|
''' Exercises with exceptions.
Exercise 1:
With the function def raise_if_not_length_four(value): check the length of
the value argument and raise an exception (specifically a ValueError) if
the length is not 4.
Exercise 2:
With the function def raise_if_not_four_characters(value): check the length of
the value argument and raise an exception (specifically a ValueError) if
the length is not 4 AND check the value is a string, if not raise, well what
would be suitable?
Exercise 3:
You need to write a function that takes an imaginary network connection (that
might be good or bad). You have some third party code that provides and data
base connection with a class DataBase. You can read data from this class but
you must always call close() before your function exits.
The problem is that if the network is bad the DataBase will raise so how do
you make sure that you always call close().
The first part of the problem is to trap the exception.
The second part of the problem is to allow the exception to propagate so that
the caller has to deal with it but you still have to call close().
Created on 3 Jan 2017
@author: paulross
'''
import pytest
#==== Exercise 1:
def raise_if_not_length_four(value):
# Your code goes here
return value
def test_raise_if_not_length_four():
assert raise_if_not_length_four('ABCD') == 'ABCD'
assert raise_if_not_length_four(['', '', '', '']) == ['', '', '', '']
def test_raise_if_not_length_four_raises_ValueError():
with pytest.raises(ValueError) as err:
raise_if_not_length_four('')
assert err.value.args[0] == 'Argument must be length 4, not 0'
#==== Exercise 2:
def raise_if_not_four_characters(value):
# Your code goes here
return value
def test_raise_if_not_four_characters():
assert raise_if_not_four_characters('ABCD') == 'ABCD'
def test_raise_if_not_four_characters_raises_ValueError():
with pytest.raises(ValueError) as err:
raise_if_not_four_characters('')
assert err.value.args[0] == 'Argument must be length 4, not 0'
def test_raise_if_not_length_four_raises_TypeError():
with pytest.raises(TypeError) as err:
raise_if_not_four_characters(['', '', '', ''])
assert err.value.args[0] == "Argument must be a string, not <type 'list'>"
#=== Exercise 3:
#---- Regard this as third party library code that you can NOT change.
GOOD_NETWORK = 0
BAD_NETWORK = 1
class DataBase(object):
number_of_connections = 0
def __init__(self, network):
self.network = network
DataBase.number_of_connections += 1
def read(self):
if self.network == BAD_NETWORK:
raise IOError('Ooops')
return 'Data...'
def close(self):
DataBase.number_of_connections -= 1
def reset():
DataBase.number_of_connections = 0
#---- End of third party library code
def get_data_one(network):
# Modify this function
db = DataBase(network)
result = db.read()
db.close()
return result
def get_data_two(network):
# Modify this function
db = DataBase(network)
result = db.read()
db.close()
return result
def test_get_data_one_good_network():
reset()
assert get_data_one(GOOD_NETWORK) == 'Data...'
assert DataBase.number_of_connections == 0
def test_get_data_one_bad_network():
reset()
assert get_data_one(BAD_NETWORK) == ''
assert DataBase.number_of_connections == 0
def test_get_data_two_good_network():
reset()
assert get_data_two(GOOD_NETWORK) == 'Data...'
assert DataBase.number_of_connections == 0
def test_get_data_two_bad_network():
reset()
with pytest.raises(IOError) as err:
get_data_two(BAD_NETWORK)
assert err.value.args[0] == 'Ooops'
assert DataBase.number_of_connections == 0
def main():
return pytest.main(__file__ + ' -v')
if __name__ == '__main__':
main()
|
from jinja2 import Template
from urllib import urlencode
from graphitepager.level import Level
ALERT_MISSING_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}. Go to {{graph_url}}.
{% if docs_url %}Documentation: {{docs_url}}{% endif %}.
"""
HTML_ALERT_MISSING_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}.
Go to <a href="{{graph_url}}">the graph</a>.
{% if docs_url %}<a href="{{docs_url}}">Documentation</a>{% endif %}.
"""
SLACK_ALERT_MISSING_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}.
Go to the <{{graph_url}}|graph>.
{% if docs_url %}<{{docs_url}}|Documentation>{% endif %}.
"""
STDOUT_MISSING_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}. Go to {{graph_url}}.
"""
ALERT_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}. The current value is
{{current_value}} which passes the {{threshold_level|lower}}
value of {{threshold_value}}. Go to {{graph_url}}.
{% if docs_url %}Documentation: {{docs_url}}{% endif %}.
"""
HTML_ALERT_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}. The current value is
{{current_value}} which passes the {{threshold_level|lower}}
value of {{threshold_value}}.
Go to <a href="{{graph_url}}">the graph</a>.
{% if docs_url %}<a href="{{docs_url}}">Documentation</a>{% endif %}.
"""
SLACK_ALERT_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}. The current value is
{{current_value}} which passes the {{threshold_level|lower}}
value of {{threshold_value}}.
Go to the <{{graph_url}}|graph>.
{% if docs_url %}<{{docs_url}}|Documentation>{% endif %}.
"""
STDOUT_TEMPLATE = r"""{{level}} alert for
{{alert.get('name')}} {{record.target}}. The current value is
{{current_value}} which passes the {{threshold_level|lower}}
value of {{threshold_value}}.
"""
class Description(object):
def __init__(self, template, graphite_url, alert, record, level, value):
self.template = template
self.graphite_url = graphite_url
self.alert = alert
self.record = record
self.level = level
self.value = value
def __str__(self):
return self.description_for_alert(
self.template,
self.graphite_url,
self.alert,
self.record,
self.level,
self.value,
)
def stdout(self):
template = STDOUT_TEMPLATE
if self.level == Level.NO_DATA:
template = STDOUT_MISSING_TEMPLATE
return self.description_for_alert(
template,
self.graphite_url,
self.alert,
self.record,
self.level,
self.value,
)
def html(self):
template = HTML_ALERT_TEMPLATE
if self.level == Level.NO_DATA:
template = HTML_ALERT_MISSING_TEMPLATE
return self.description_for_alert(
template,
self.graphite_url,
self.alert,
self.record,
self.level,
self.value,
)
def slack(self):
template = SLACK_ALERT_TEMPLATE
if self.level == Level.NO_DATA:
template = SLACK_ALERT_MISSING_TEMPLATE
return self.description_for_alert(
template,
self.graphite_url,
self.alert,
self.record,
self.level,
self.value,
)
def description_for_alert(self,
template,
graphite_url,
alert,
record,
level,
current_value):
context = dict(locals())
context['graphite_url'] = graphite_url
if type(record) == str:
context['docs_url'] = alert.documentation_url()
else:
context['docs_url'] = alert.documentation_url(record.target)
url_params = (
('width', 586),
('height', 308),
('target', alert.get('target')),
('target', 'threshold({},"Warning")'.format(
alert.get('warning'))),
('target', 'threshold({},"Critical")'.format(
alert.get('critical'))),
('from', '-20mins'),
)
url_args = urlencode(url_params)
url = '{}/render/?{}'.format(graphite_url, url_args)
context['graph_url'] = url.replace('https', 'http')
context['threshold_value'] = alert.value_for_level(level)
if level == Level.NOMINAL:
context['threshold_level'] = 'warning'
else:
context['threshold_level'] = level
return Template(template).render(context)
def _get_description(graphite_url,
alert,
record,
alert_level,
value,
alert_template):
return Description(
alert_template,
graphite_url,
alert,
record,
alert_level,
value
)
def get_description(graphite_url,
alert,
record,
alert_level,
value):
return _get_description(graphite_url,
alert,
record,
alert_level,
value,
ALERT_TEMPLATE)
def missing_target_description(graphite_url,
alert,
record,
alert_level,
value):
return _get_description(graphite_url,
alert,
record,
alert_level,
value,
ALERT_MISSING_TEMPLATE)
|
#!/usr/bin/python
# encoding: utf-8
# filename: trabalhoTecnico.py
#
# scriptLattes V8
# Copyright 2005-2013: Jesús P. Mena-Chalco e Roberto M. Cesar-Jr.
# http://scriptlattes.sourceforge.net/
#
#
# Este programa é um software livre; você pode redistribui-lo e/ou
# modifica-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença, ou (na sua opinião) qualquer versão.
#
# Este programa é distribuído na esperança que possa ser util,
# mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer
# MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from scriptLattes import *
from geradorDePaginasWeb import *
import re
class TrabalhoTecnico:
item = None # dado bruto
idMembro = None
relevante = None
autores = None
titulo = None
ano = None
chave = None
def __init__(self, idMembro, partesDoItem, relevante):
# partesDoItem[0]: Numero (NAO USADO)
# partesDoItem[1]: Descricao do livro (DADO BRUTO)
self.idMembro = set([])
self.idMembro.add(idMembro)
self.relevante = relevante
self.item = partesDoItem[1]
# Dividir o item na suas partes constituintes
partes = self.item.partition(" . ")
self.autores = partes[0].strip()
partes = partes[2]
aux = re.findall(u' ((?:19|20)\d\d)\\b', partes)
if len(aux)>0:
self.ano = aux[-1] #.strip().rstrip(".").rstrip(",")
partes = partes.rpartition(" ")
partes = partes[0]
else:
self.ano = ''
self.titulo = partes.strip().rstrip(".").rstrip(",")
self.chave = self.autores # chave de comparação entre os objetos
def compararCom(self, objeto):
if self.idMembro.isdisjoint(objeto.idMembro) and compararCadeias(self.titulo, objeto.titulo):
# Os IDs dos membros são agrupados.
# Essa parte é importante para a criação do GRAFO de colaborações
self.idMembro.update(objeto.idMembro)
if len(self.autores)<len(objeto.autores):
self.autores = objeto.autores
if len(self.titulo)<len(objeto.titulo):
self.titulo = objeto.titulo
return self
else: # nao similares
return None
def html(self, listaDeMembros):
s = self.autores + '. <b>' + self.titulo + '</b>. '
s+= str(self.ano) + '.' if str(self.ano).isdigit() else '.'
s+= menuHTMLdeBuscaPT(self.titulo)
return s
# ------------------------------------------------------------------------ #
def __str__(self):
s = "\n[TRABALHO TECNICO] \n"
s += "+ID-MEMBRO : " + str(self.idMembro) + "\n"
s += "+RELEVANTE : " + str(self.relevante) + "\n"
s += "+AUTORES : " + self.autores.encode('utf8','replace') + "\n"
s += "+TITULO : " + self.titulo.encode('utf8','replace') + "\n"
s += "+ANO : " + str(self.ano) + "\n"
s += "+item : " + self.item.encode('utf8','replace') + "\n"
return s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.