text
stringlengths 3
1.04M
| lang
stringclasses 4
values | len
int64 3
1.04M
|
---|---|---|
'''
这一文件是总控制程序,它的工作原理为:
创建线程池
调用GUI绘制程序
调用面部识别(关于这一功能还存在疑问,但是还是先试验一下线程流畅性)
GUI界面的交互按钮将在被唤醒时开启舵机/电机控制程序以及时间控制程序
GUI界面始终保持运行,监听用户唤醒操作
''''
import threading
import GUI
import face_detection
def test():
print(threading.active_count()))
print(threading.enumerate())
print(threading.current_thread())
if __name__ == '__main__':
screen_GUI = threading.Thread(target = 1.Screen_Function)
screen_GUI.start()
face_recognition = threading.Thread(target = 2.Face_Detection)
face_recognition.start()
# 需要注意人脸识别这里,看一看它要吃多少计算资源
| python | 533 |
from datetime import datetime
from enum import Enum
from itertools import chain
from typing import Dict, List, Union
from dateutil import tz
Comparable = Union[str, Enum, datetime, bool]
def _format_kwargs(kwargs: Dict) -> List[str]:
return [f"{k} {v}" for k, v in kwargs.items()]
def _format_comparable(value: Comparable) -> str:
if isinstance(value, Enum):
return f"'{value.value}'"
elif isinstance(value, datetime):
return value.astimezone(tz.UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
elif value is True:
return "true"
elif value is False:
return "false"
else:
return f"'{value}'"
def _generate_operator(op, args, kwargs) -> str:
if len(args) + len(kwargs) == 1:
return "".join(chain(args, _format_kwargs(kwargs)))
condition = f" {op} ".join(chain(args, _format_kwargs(kwargs)))
return f"({condition.strip()})"
def and_(*args, **kwargs) -> str:
return _generate_operator("and", args, kwargs)
def or_(*args, **kwargs) -> str:
return _generate_operator("or", args, kwargs)
def eq(value: Comparable) -> str:
return f"eq {_format_comparable(value)}"
def ne(value: Comparable) -> str:
return f"ne {_format_comparable(value)}"
def gt(value: Comparable) -> str:
return f"gt {_format_comparable(value)}"
def ge(value: Comparable) -> str:
return f"ge {_format_comparable(value)}"
def lt(value: Comparable) -> str:
return f"lt {_format_comparable(value)}"
def le(value: Comparable) -> str:
return f"le {_format_comparable(value)}"
| python | 1,558 |
import math
import numpy as np
def calculate_quantiles(num_quantile_groups, quantiles):
len_quant = len(quantiles)
if not (num_quantile_groups and 0 < num_quantile_groups <= len_quant):
num_quantile_groups = 4
quant_multiplier = len_quant/num_quantile_groups
# quantile is one less than group
# Goes from zero (inclusive) to number of groups (exclusive)
# +1 because 0 + 1 * multiplier = correct first quantile
# -1 because 0 index
# i.e. quantile:
# quant_multiplier = 1000 / 4 = 250
# [0 + 1] * (quant_multiplier) - 1 = 1 * 250 - 1 = 249 (first quantile)
quantiles = {
ind: quantiles[math.ceil((ind + 1) * quant_multiplier) - 1]
for ind in range(num_quantile_groups - 1)
}
return quantiles
def flat_dict(od, separator='_', key=''):
"""
Function to flatten nested dictionary. Each level is collapsed and joined with the specified seperator.
:param od: dictionary or dictionary-like object
:type od: dict
:param seperator: character(s) joining successive levels
:type seperator: str
:param key: concatenated keys
:type key: str
:returns: unnested dictionary
:rtype: dict
"""
return {str(key).replace(' ','_') + separator + str(k) if key else k : v
for kk, vv in od.items()
for k, v in flat_dict(vv, separator, kk).items()
} if isinstance(od, dict) else {key:od}
def _prepare_report(report, output_format=None):
if output_format:
output_format = output_format.lower()
if not output_format or output_format not in ['pretty', 'serializable', 'flat']:
return report
report = report.copy()
max_str_len = 50
max_array_len = 5
for key in report:
if isinstance(report[key], dict):
report[key] = _prepare_report(report[key], output_format=output_format)
elif isinstance(report[key], list) or isinstance(report[key], np.ndarray):
if output_format == "pretty":
if isinstance(report[key], list):
report[key] = np.array(report[key])
str_value = np.array2string(report[key], separator=', ')
if len(str_value) > max_str_len and len(report[key]) > max_array_len:
ind = 1
str_value = ''
while len(str_value) <= max_str_len:
str_value = \
np.array2string(report[key][:ind], separator=', ')[:-1] + \
', ... , ' + \
np.array2string(
report[key][-ind:], separator=', ')[1:]
ind += 1
report[key] = str_value
elif output_format == "serializable" and isinstance(report[key], np.ndarray):
report[key] = report[key].tolist()
elif output_format == "pretty" and isinstance(report[key], float):
report[key] = round(report[key], 4)
if output_format == 'flat':
report = flat_dict(report)
return report
| python | 3,101 |
# -*- coding: utf-8 -*-
"""
Biglans classification
This can be used for machine learning.
3 dimensions are investigated by Biglan:
pure - applied
hard - soft
life - non-life
"""
author = 'Biglan'
authors = author
concepts = []
data = []
claim = []
theory = '''
'''
approach = ''
reading = '''
1. Ruth Neumann, Disciplinarity, In Tight Malcolm, Ka Ho Mok, Jeroen Huisman, Christopher C. Morphew (Ed.), The Rutledge International Handbook of Higher Education, Routledge, USA, pp 487-500, 2009.
2. Yonghong Jade Xu, Faculty Turnover: Discipline-Specific Attention is Warranted, Res earsch in High Educ ation. Springer, Vol. 49, pp 40–61, Feb 2008.
3. Matthew Kwok, Disciplinary Differences in the Development of Employability Skills of Recent University Graduates in Manitoba: Some Initial Findings. Higher Education Perspectives, volume 1, issue 1, pp.60-77, 2004.
4. Biglan, A., The characteristics of subject matter in academic areas, Journal of Applied Psychology, 57, 195–203, 1973.
5. Malaney, G. D., Differentiation in graduate education, Research in Higher Education, 25(1), pp 82–96, 1986.
6. Design of Interventions for Instructional Reform in Software Development Education for Competency Enhancement: Summary of PhD Thesis
7. Assess Your Curriculum and Courses Using Harden’s Taxonomy of Curriculum Integration
8. Software Development Education: Breadth Courses for Developing Domain Competence and Systems Thinking
'''
urls = 'http://goelsan.wordpress.com/2010/07/27/biglans-classification-of-disciplines/'
further = [
'http://cpr.iub.edu/uploads/AACU2008Tables.pdf',
'Effective Educational Practices and Essential Learning Outcomes in General Education Courses: Differences by Discipline']
the_classification = [
[['Biology', 'Biochemistry', 'Genetics', 'Physiology'], {'pure':True, 'hard':True, 'life':True}],
[['Mathematics', 'Physics', 'Chemistry', 'Geology', 'Astronomy', 'Oceanography'], {'pure':True, 'hard':True, 'life':False}],
[['Psychology', 'Sociology', 'Anthropology', 'Political Science', 'Area Study'], {'pure':True, 'hard':False, 'life':True}],
[['Linguistics', 'Literature', 'Communications', 'Creative Writing', 'Economics', 'Philosophy', 'Archaeology', 'History', 'Geography'], {'pure':True, 'hard':False, 'life':True}],
[['Agriculture', 'Psychiatry', 'Medicine', 'Pharmacy', 'Dentistry', 'Horticulture'], {'pure':False, 'hard':True, 'life':True}],
[['Civil Engineering', 'Telecommunication Engineering', 'Mechanical Engineering', 'Chemical Engineering', 'Electrical Engineering', 'Computer science'], {'pure':False, 'hard':True, 'life':False}],
[['Recreation', 'Arts', 'Education', 'Nursing', 'Conservation', 'Counseling', 'HR Management'], {'pure':False, 'hard':False, 'life':True}],
[['Finance', 'Accounting', 'Banking', 'Marketing', 'Journalism', 'Library And Archival Science', 'Law', 'Architecture', 'Interior Design', 'Crafts', 'Arts', 'Dance', 'Music'], {'pure':False, 'hard':False, 'life':False}]
]
#it is interesting that he himself does outline "etc." what doe 'etc' mean here?
#notebook should contain link to the table itself | python | 3,150 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for not_equal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_not_equal_tests(options):
"""Make a set of tests to do not equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the not euqal op testing graph."""
input_value1 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.not_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
| python | 2,573 |
# Do not edit the class below except for the buildHeap,
# siftDown, siftUp, peek, remove, and insert methods.
# Feel free to add new properties and methods to the class.
class MinHeap:
def __init__(self, array):
# Do not edit the line below.
self.heap = self.buildHeap(array)
def buildHeap(self, array):
# Write your code here.
for index in range(len(array) - 1, -1, -1):
array = self.siftDown(array, index)
return array
def siftDown(self, array, parent_node_index):
# Write your code here.
if parent_node_index < 0 or parent_node_index > len(array) - 1:
return array
cur_idx = parent_node_index
idx_to_swap = cur_idx
while cur_idx < len(array):
ch_one_idx = self.find_child_one_index(array, cur_idx)
ch_two_idx = self.find_child_two_index(array, cur_idx)
if ch_one_idx == -1:
break
if ch_two_idx == -1:
idx_to_swap = ch_one_idx if array[ch_one_idx] < array[cur_idx] else -1
elif array[ch_one_idx] < array[ch_two_idx]:
idx_to_swap = ch_one_idx if array[ch_one_idx] < array[cur_idx] else -1
else:
idx_to_swap = ch_two_idx if array[ch_two_idx] < array[cur_idx] else -1
if idx_to_swap == -1:
break
array = self.swap(array, cur_idx, idx_to_swap)
cur_idx = idx_to_swap
return array
def siftUp(self, array, ch_idx):
# Write your code here.
cur_idx = ch_idx
while cur_idx > -1:
par_idx = self.find_parent_index(array, cur_idx)
if par_idx == -1:
break
if array[cur_idx] < array[par_idx]:
array = self.swap(array, cur_idx, par_idx)
cur_idx = par_idx
else:
break
return array
def peek(self):
# Write your code here.
return self.heap[0]
def remove(self):
# Write your code here.
# swap first and last
self.heap[0], self.heap[len(self.heap) - 1] = self.heap[len(self.heap) - 1], self.heap[0]
# remove last
value_to_remove = self.heap.pop()
# siftdown first
self.heap = self.siftDown(self.heap, 0)
return value_to_remove
def insert(self, value):
# Write your code here.
# add value as the last element of array
self.heap.append(value)
# siftup the new element
self.heap = self.siftUp(self.heap, len(self.heap) - 1)
@staticmethod
def swap(array, pos_1, pos_2):
if pos_1 > len(array) - 1 or pos_1 < 0:
return array
elif pos_2 > len(array) - 1 or pos_2 < 0:
return array
else:
array[pos_1], array[pos_2] = array[pos_2], array[pos_1]
return array
@staticmethod
def find_parent_index(array, child_index):
if child_index > len(array) - 1 or child_index < 0:
return -1
elif (child_index - 1) // 2 > len(array) - 1 or (child_index - 1) // 2 < 0:
return -1
else:
return (child_index - 1) // 2
@staticmethod
def find_child_one_index(array, parent_index):
if parent_index > len(array) - 1 or parent_index < 0:
return -1
elif 2 * parent_index + 1 > len(array) - 1:
return -1
else:
return 2 * parent_index + 1
@staticmethod
def find_child_two_index(array, parent_index):
if parent_index > len(array) - 1 or parent_index < 0:
return -1
elif 2 * parent_index + 2 > len(array) - 1:
return -1
else:
return 2 * parent_index + 2
# TEST
# before_heap = [48, 12, 24, 7, 8, -5, 24, 391, 24, 56, 2, 6, 8, 41]
# result_heap = [-5, 2, 6, 7, 8, 8, 24, 391, 24, 56, 12, 24, 48, 41]
# print(MinHeap.find_parent_index(before_heap, 1) == 0)
# print(MinHeap.find_parent_index(before_heap, -1000) == -1)
# print(MinHeap.find_parent_index(before_heap, 1000) == -1)
# print(MinHeap.find_child_one_index(before_heap, 1) == 3)
# print(MinHeap.find_child_one_index(before_heap, -123094) == -1)
# print(MinHeap.find_child_one_index(before_heap, 123094) == -1)
# print(MinHeap.find_child_two_index(before_heap, 5) == 12)
# print(MinHeap.find_child_two_index(before_heap, -123094) == -1)
# print(MinHeap.find_child_two_index(before_heap, 123094) == -1)
# min_heap = MinHeap(before_heap)
# print(min_heap.heap == result_heap)
# TEST X
# before_heap = [-7, 2, 3, 8, -10, 4, -6, -10, -2, -7, 10, 5, 2, 9, -9, -5, 3, 8]
# print(before_heap)
# min_heap = MinHeap(before_heap)
# print(min_heap.heap)
# print(min_heap.remove() == -10)
# print(min_heap.peek() == -10)
# print(min_heap.insert(-8))
# print(min_heap.peek() == -10)
# print(min_heap.remove() == -10)
# print(min_heap.peek() == -9)
# print(min_heap.insert(8))
# print(min_heap.peek() == -9)
# TEST 9
before_heap = [
-823,
164,
48,
-987,
323,
399,
-293,
183,
-908,
-376,
14,
980,
965,
842,
422,
829,
59,
724,
-415,
-733,
356,
-855,
-155,
52,
328,
-544,
-371,
-160,
-942,
-51,
700,
-363,
-353,
-359,
238,
892,
-730,
-575,
892,
490,
490,
995,
572,
888,
-935,
919,
-191,
646,
-120,
125,
-817,
341,
-575,
372,
-874,
243,
610,
-36,
-685,
-337,
-13,
295,
800,
-950,
-949,
-257,
631,
-542,
201,
-796,
157,
950,
540,
-846,
-265,
746,
355,
-578,
-441,
-254,
-941,
-738,
-469,
-167,
-420,
-126,
-410,
59
]
# print(before_heap)
# min_heap = MinHeap(before_heap)
# print(min_heap.heap)
# print(min_heap.insert(2))
# print(min_heap.insert(22))
# print(min_heap.insert(222))
# print(min_heap.insert(2222))
# print(min_heap.remove() == -987)
# print(min_heap.remove() == -950)
# print(min_heap.remove() == -949)
# print(min_heap.remove() == -942)
# TEST 5
before_heap = [-7, 2, 3, 8, -10, 4, -6, -10, -2, -7, 10, 5, 2, 9, -9, -5, 3, 8]
print(before_heap)
min_heap = MinHeap(before_heap)
# print(min_heap.heap)
# print(min_heap.remove() == -10)
# print(min_heap.peek() == -10)
print(min_heap.insert(-8))
# print(min_heap.peek() == -10)
# print(min_heap.remove() == -10)
# print(min_heap.peek() == -9)
# print(min_heap.insert(8))
# print(min_heap.peek() == -9)
# print(min_heap.heap)
def isMinHeapPropertySatisfied(array):
print(array)
for currentIdx in range(1, len(array)):
parentIdx = (currentIdx - 1) // 2
# print(currentIdx, parentIdx)
if array[parentIdx] > array[currentIdx]:
return False
return True
print(isMinHeapPropertySatisfied(min_heap.heap)) | python | 6,932 |
from django.core.cache import cache
class ClearCacheMixin:
"""Mixin for clearing cache after every test"""
def setUp(self) -> None:
cache.clear()
def tearDown(self) -> None:
cache.clear()
| python | 220 |
"""
PrimitiveGenerator
Provides hardware, board and debug services using primitives
"""
from pyedbglib.primitive import primitives
from primitivebase import DebugInterface
from primitivebase import HardwareInterface
from primitivebase import BoardInterface
from primitivebase import ProgramExecInterface
class HardwareInterfacePrimitiveGenerator(HardwareInterface):
"""
Primitive generator for hardware services
"""
def __init__(self, target):
self.target = target
def set_clk(self):
self.target.new_element(primitives.SET_CLK_HI)
def clr_clk(self):
self.target.new_element(primitives.SET_CLK_LO)
def _pins(self, value):
self.target.new_element(primitives.SET_ICSP_PINS)
self.target.append_byte(value & 0xFF)
def get_pins(self):
self.target.new_element(primitives.GET_ICSP_PINS)
result = self.target.sync(1)
return result
def set_mclr_high(self):
self.target.new_element(primitives.SET_VPP_ON)
def set_mclr_low(self):
self.target.new_element(primitives.SET_VPP_OFF)
class BoardInterfacePrimitiveGenerator(BoardInterface):
"""
Primitive generator for board services
"""
def __init__(self, target):
self.target = target
def delay_ms(self, milli_seconds):
self.target.new_element(primitives.DELAY_MS)
self.target.append_le16(milli_seconds)
def delay_us(self, micro_seconds):
self.target.new_element(primitives.DELAY_US)
self.target.append_le16(micro_seconds)
class DebugInterfacePrimitiveGenerator(DebugInterface):
"""
Primitive generator for debug services
"""
# pylint: disable=too-few-public-methods
def __init__(self, target):
self.target = target
def debug_command(self, de_cmd, bytes_out, bytes_in):
self.target.new_element(primitives.DE_COMMAND)
self.target.append_byte(de_cmd)
self.target.append_le16(bytes_out)
self.target.append_le16(bytes_in)
class ProgramExecInterfacePrimitiveGenerator(ProgramExecInterface):
"""
Primitive generator for programming executive services
"""
def __init__(self, target):
self.target = target
def send_word(self, word):
"""
Send a 16 bit word
"""
self.target.new_element(primitives.P24_SEND_PE_WORD)
self.target.append_le16(word)
def send_word_buf(self):
"""
Send a 16 bit word from the data pipe (indirect write)
"""
self.target.new_element(primitives.P24_SEND_PE_WORD_BUF)
def receive_word(self):
"""
Receive a 16 bit word to the data pipe (indirect read)
"""
self.target.new_element(primitives.P24_RECEIVE_PE_WORD)
def handshake(self):
"""
Wait for Programming Executive to finish executing current command
"""
self.target.new_element(primitives.P24_PE_HANDSHAKE) | python | 2,951 |
# coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Get a catalog of acquisition stars using the algorithm described in
https://docs.google.com/presentation/d/1VtFKAW9he2vWIQAnb6unpK4u1bVAVziIdX9TnqRS3a8
"""
import numpy as np
from scipy import ndimage, stats
from scipy.interpolate import interp1d
from chandra_aca.star_probs import acq_success_prob, prob_n_acq
from chandra_aca.transform import (pixels_to_yagzag, mag_to_count_rate,
snr_mag_for_t_ccd)
from . import characteristics as ACA
from . import characteristics_acq as ACQ
from .core import (get_mag_std, ACACatalogTable, bin2x2,
get_image_props, pea_reject_image, ACABox,
MetaAttribute, AliasAttribute, calc_spoiler_impact)
def get_acq_catalog(obsid=0, **kwargs):
"""
Get a catalog of acquisition stars using the algorithm described in
https://docs.google.com/presentation/d/1VtFKAW9he2vWIQAnb6unpK4u1bVAVziIdX9TnqRS3a8
If ``obsid`` corresponds to an already-scheduled obsid then the parameters
``att``, ``man_angle``, ``t_ccd``, ``date``, and ``dither`` will
be fetched via ``mica.starcheck`` if not explicitly provided here.
:param obsid: obsid (default=0)
:param att: attitude (any object that can initialize Quat)
:param n_acq: desired number of acquisition stars (default=8)
:param man_angle: maneuver angle (deg)
:param t_ccd: ACA CCD temperature (degC)
:param date: date of acquisition (any DateTime-compatible format)
:param dither: dither size (float or 2-element sequence (dither_y, dither_z), arcsec)
:param detector: 'ACIS-S' | 'ACIS-I' | 'HRC-S' | 'HRC-I'
:param sim_offset: SIM translation offset from nominal [steps] (default=0)
:param focus_offset: SIM focus offset [steps] (default=0)
:param stars: table of AGASC stars (will be fetched from agasc if None)
:param include_ids: list of AGASC IDs of stars to include in selected catalog
:param include_halfws: list of acq halfwidths corresponding to ``include_ids``.
For values of ``0`` proseco chooses the best halfwidth(s).
:param exclude_ids: list of AGASC IDs of stars to exclude from selected catalog
:param optimize: optimize star catalog after initial selection (default=True)
:param verbose: provide extra logging info (mostly calc_p_safe) (default=False)
:param print_log: print the run log to stdout (default=False)
:returns: AcqTable of acquisition stars
"""
# Make an empty AcqTable object, mostly for logging. It gets populated
# after selecting initial an inital catalog of potential acq stars.
acqs = AcqTable()
acqs.set_attrs_from_kwargs(obsid=obsid, **kwargs)
acqs.set_stars()
# Only allow imposters that are statistical outliers and are brighter than
# this (temperature-dependent) threshold. See characterisics.py for more
# explanation.
acqs.imposters_mag_limit = snr_mag_for_t_ccd(acqs.t_ccd,
ref_mag=ACQ.imposter_mag_lim_ref_mag,
ref_t_ccd=ACQ.imposter_mag_lim_ref_t_ccd)
acqs.log(f'getting dark cal image at date={acqs.date} t_ccd={acqs.t_ccd:.1f}')
# Probability of man_err for this observation with a given man_angle. Used
# for marginalizing probabilities over different man_errs.
acqs.p_man_errs = np.array([get_p_man_err(man_err, acqs.man_angle)
for man_err in ACQ.man_errs])
acqs.cand_acqs = acqs.get_acq_candidates(acqs.stars)
# Fill in the entire acq['probs'].p_acqs table (which is actual a dict of keyed by
# (box_size, man_err) tuples).
for acq in acqs.cand_acqs:
acq['probs'] = AcqProbs(acqs, acq, acqs.dither, acqs.stars, acqs.dark,
acqs.t_ccd, acqs.date)
acqs.get_initial_catalog()
if acqs.optimize:
acqs.optimize_catalog(acqs.verbose)
# Set p_acq column to be the marginalized probabilities
acqs.update_p_acq_column(acqs)
# Sort to make order match the original candidate list order (by
# increasing mag), and assign a slot. Sadly astropy 3.1 has a real
# performance bug here and doing the sort makes 6 deepcopy's of the
# meta, which in this case is substantial (mostly stars). So temporarily
# clear out the meta before sorting and then restore from a (light) copy.
acqs_meta_copy = acqs.meta.copy()
acqs.meta.clear()
acqs.sort('idx')
acqs.meta.update(acqs_meta_copy)
acqs['slot'] = np.arange(len(acqs), dtype=np.int64)
# Add slot to cand_acqs table, putting in -99 if not selected as acq.
# This is for convenience in downstream reporting or introspection.
slots = [acqs.get_id(acq['id'])['slot'] if acq['id'] in acqs['id'] else -99
for acq in acqs.cand_acqs]
acqs.cand_acqs['slot'] = np.array(slots, dtype=np.int64)
if len(acqs) < acqs.n_acq:
acqs.log(f'Selected only {len(acqs)} acq stars versus requested {acqs.n_acq}',
warning=True)
return acqs
class AcqTable(ACACatalogTable):
"""
Catalog of acquisition stars
"""
# Define base set of allowed keyword args to __init__. Subsequent MetaAttribute
# or AliasAttribute properties will add to this.
allowed_kwargs = ACACatalogTable.allowed_kwargs.copy()
# Catalog type when plotting (None | 'FID' | 'ACQ' | 'GUI')
catalog_type = 'ACQ'
# Elements of meta that should not be directly serialized to pickle
# (either too big or requires special handling).
pickle_exclude = ('stars', 'dark', 'bad_stars')
# Name of table. Use to define default file names where applicable.
# (e.g. `obs19387/acqs.pkl`).
name = 'acqs'
# Required attributes
required_attrs = ('att', 'man_angle', 't_ccd_acq', 'date', 'dither_acq')
t_ccd = AliasAttribute() # Maps t_ccd to t_ccd_acq base attribute
dither = AliasAttribute() # .. and likewise.
include_ids = AliasAttribute()
include_halfws = AliasAttribute()
exclude_ids = AliasAttribute()
# IDs that are included but with halfw=0 which implies to optimize halfw
# instead of freezing at the provided value. This attribute is set internally
# based on the values of include_halfws.
include_optimize_halfw_ids = MetaAttribute(is_kwarg=False, default=())
p_man_errs = MetaAttribute(is_kwarg=False)
cand_acqs = MetaAttribute(is_kwarg=False)
p_safe = MetaAttribute(is_kwarg=False)
_fid_set = MetaAttribute(is_kwarg=False, default=())
imposters_mag_limit = MetaAttribute(is_kwarg=False, default=20.0)
@classmethod
def empty(cls):
"""
Return a minimal ACACatalogTable which satisfies API requirements. For AcqTable
it should have 'id' and 'halfw' columns.
:returns: StarsTable of stars (empty)
"""
out = super().empty()
out['halfw'] = np.full(fill_value=0, shape=(0,), dtype=np.int64)
return out
@property
def fid_set(self):
if not hasattr(self, '_fid_set'):
self._fid_set = ()
return self._fid_set
@fid_set.setter
def fid_set(self, fid_ids):
# No action required if fid_set is already fid_ids
if self.fid_set == tuple(fid_ids):
return
if self.fids is None:
raise ValueError('cannot set fid_set before setting fids')
cand_fids = self.fids.cand_fids
if cand_fids is None:
raise ValueError('cannot set fid_set before selecting candidate fids')
self._fid_set = ()
cand_fids_ids = list(cand_fids['id'])
for fid_id in sorted(fid_ids):
if fid_id in cand_fids_ids:
self._fid_set += (fid_id,)
else:
self.log(f'Fid {fid_id} is not in available candidate '
f'fid ids {cand_fids_ids}, ignoring',
warning=True)
# Update marginalized p_acq and p_safe. The underlying probability
# functions know about fid_set and new values are computed on-demand.
self.update_p_acq_column(self)
self.calc_p_safe()
def make_report(self, rootdir='.'):
"""
Make summary HTML report for acq selection process and outputs.
Output is in ``<rootdir>/obs<obsid>/acq/index.html`` plus related images
in that directory.
:param rootdir: root directory for outputs
"""
from .report_acq import make_report
make_report(self, rootdir=rootdir)
def update_p_acq_column(self, acqs):
"""
Update (in-place) the marginalized acquisition probability column
'p_acq'. This is typically called after a change in catalog or
change in the fid set. The acq['probs'].p_acq_marg() method will
pick up the new fid set.
:param acqs:
:param acqs:
"""
for acq in self:
acq['p_acq'] = acq['probs'].p_acq_marg(acq['halfw'], acqs)
def update_idxs_halfws(self, idxs, halfws):
"""
Update the rows of self to match the specified ``agasc_ids``
and half widths. These two input lists must match the length
of self and correspond to stars in self.cand_acqs.
:param agasc_ids: list of AGASC IDs
:param halfws: list of search box half widths
"""
if len(idxs) != len(self) or len(halfws) != len(self):
raise ValueError('input lists must match length of acqs')
for acq, idx, halfw in zip(self, idxs, halfws):
if acq['idx'] != idx:
acq_new = self.cand_acqs[idx]
for name in self.colnames:
acq[name] = acq_new[name]
acq['halfw'] = halfw
def get_log_p_2_or_fewer(self):
"""
Return the starcheck acquisition merit function of the probability of
acquiring two or fewer stars.
:returns: log10(probability) (float)
"""
n_or_fewer_probs = prob_n_acq(self['p_acq'])[1]
if len(n_or_fewer_probs) > 2:
p_2_or_fewer = n_or_fewer_probs[2]
else:
p_2_or_fewer = 1.0
return np.log10(p_2_or_fewer)
def get_obs_info(self):
"""
Convenience method to return the parts of meta that are needed
for test_common OBS_INFO.
:returns: dict of observation information
"""
keys = ('obsid', 'att', 'date', 't_ccd_acq', 't_ccd_guide', 'man_angle',
'dither_acq', 'dither_guide',
'detector', 'sim_offset', 'focus_offset')
return {key: getattr(self, key) for key in keys}
def get_candidates_mask(self, stars):
"""Get base filter for acceptable candidates.
This does not include spatial filtering.
:param stars: StarsTable
:returns: bool mask of acceptable stars
"""
ok = ((stars['CLASS'] == 0) &
(stars['mag'] > 5.3) &
(stars['mag'] < 11.0) &
(~np.isclose(stars['COLOR1'], 0.7)) &
(stars['mag_err'] < 1.0) & # Mag err < 1.0 mag
(stars['ASPQ1'] < 40) & # Less than 2 arcsec centroid offset due to nearby spoiler
(stars['ASPQ2'] == 0) & # Proper motion less than 0.5 arcsec/yr
(stars['POS_ERR'] < 3000) & # Position error < 3.0 arcsec
((stars['VAR'] == -9999) | (stars['VAR'] == 5)) # Not known to vary > 0.2 mag
)
return ok
def get_acq_candidates(self, stars, max_candidates=20):
"""
Get candidates for acquisition stars from ``stars`` table.
This allows for candidates right up to the useful part of the CCD.
The p_acq will be accordingly penalized.
:param stars: list of stars in the field
:param max_candidates: maximum candidate acq stars
:returns: Table of candidates, indices of rejected stars
"""
ok = (self.get_candidates_mask(stars) &
(np.abs(stars['row']) < ACA.max_ccd_row) & # Max usable row
(np.abs(stars['col']) < ACA.max_ccd_col) # Max usable col
)
cand_acqs = stars[ok]
cand_acqs.sort('mag')
self.log('Filtering on CLASS, mag, COLOR1, row/col, '
'mag_err, ASPQ1/2, POS_ERR:')
self.log(f'Reduced star list from {len(stars)} to '
f'{len(cand_acqs)} candidate acq stars')
# Reject any candidate with a spoiler or bad star. Collect a list of
# good (not rejected) candidates and stop when there are
# max_candidates. Check for col spoilers only against stars that are
# bright enough and on CCD
goods = []
stars_mask = stars['mag'] < 11.5 - ACA.col_spoiler_mag_diff
for ii, acq in enumerate(cand_acqs):
if (self.in_bad_star_set(acq) or
self.has_nearby_spoiler(acq, stars) or
self.has_column_spoiler(acq, stars, stars_mask)):
continue
goods.append(ii)
if len(goods) == max_candidates:
break
cand_acqs = cand_acqs[goods]
self.log('Selected {} candidates with no spoiler (star within 3 mag and 30 arcsec)'
.format(len(cand_acqs)))
# If any include_ids (stars forced to be in catalog) ensure that the
# star is in the cand_acqs table. Need to re-sort as well.
if self.include_ids or self.include_halfws:
self.process_include_ids(cand_acqs, stars)
cand_acqs.sort('mag')
cand_acqs.rename_column('COLOR1', 'color')
# Drop all the other AGASC columns. No longer useful.
names = [name for name in cand_acqs.colnames if not name.isupper()]
cand_acqs = AcqTable(cand_acqs[names])
box_sizes_list = self.get_box_sizes(cand_acqs)
halfws = [box_sizes[0] for box_sizes in box_sizes_list]
# Make this suitable for plotting
n_cand = len(cand_acqs)
cand_acqs['idx'] = np.arange(n_cand, dtype=np.int64)
cand_acqs['type'] = np.full(n_cand, 'ACQ')
cand_acqs['halfw'] = np.array(halfws, dtype=np.int64)
# Acq prob for box_size=halfw, marginalized over man_err
cand_acqs['p_acq'] = np.full(n_cand, -999.0)
cand_acqs['probs'] = np.full(n_cand, None) # Filled in with AcqProb objects
cand_acqs['spoilers'] = np.full(n_cand, None) # Filled in with Table of spoilers
cand_acqs['imposters'] = np.full(n_cand, None) # Filled in with Table of imposters
# Cached value of box_size + man_err for spoilers
cand_acqs['spoilers_box'] = np.full(n_cand, None)
# Cached value of box_size + dither for imposters
cand_acqs['imposters_box'] = np.full(n_cand, None)
cand_acqs['box_sizes'] = box_sizes_list
return cand_acqs
def get_box_sizes(self, cand_acqs):
"""Get the available box sizes for each cand_acq as all those with size <= the
largest man_error with non-zero probability. E.g. in the 5-20 deg man
angle bin the 80-100" row is 0.1 and the 100-120" row is 0.0. So this
will will limit the box sizes to 60, 80, and 100.
An exception to the box size limit is for bright stars. For stars
brighter than 8.0 mag (referenced to t_ccd=-10), the box size is
allowed to go up to at least 100 arcsec. For stars brighter than 9.0
mag it can go up to at least 80 arcsec. At these bright mags the
larger search boxes have no impact on acquisition probability.
This is particularly relevant to man_angle < 5 deg, where the max
maneuver error is 60 arcsec. In this case, bright stars can still have
80 or 100 arcsec boxes. In the case of a creep-away observation where
the initial bias might be bad, this gives a bit more margin.
:param cand_acqs: AcqTable of candidate acq stars
:return: list of box-size arrays corresponding to cand_acqs table
"""
box_sizes_list = []
max_man_err = np.max(ACQ.man_errs[self.p_man_errs > 0])
# Get the effective equivalent of 8.0 and 9.0 mag for the current t_ccd
mag_8 = snr_mag_for_t_ccd(self.t_ccd, ref_mag=8.0, ref_t_ccd=-10.0)
mag_9 = snr_mag_for_t_ccd(self.t_ccd, ref_mag=9.0, ref_t_ccd=-10.0)
for cand_acq in cand_acqs:
mag = cand_acq['mag']
if mag < mag_8:
max_box_size = max(max_man_err, 100)
elif mag < mag_9:
max_box_size = max(max_man_err, 80)
else:
max_box_size = max_man_err
box_sizes = ACQ.box_sizes[ACQ.box_sizes <= max_box_size]
box_sizes_list.append(box_sizes)
return box_sizes_list
def in_bad_star_set(self, acq):
"""
Returns True if ``acq`` is in the bad star set.
:param acq: AcqTable Row
:returns: bool
"""
if acq['id'] in ACA.bad_star_set:
self.log(f'Rejecting star {acq["id"]} which is in bad star list', id=acq['id'])
idx = self.stars.get_id_idx(acq['id'])
self.bad_stars_mask[idx] = True
return True
else:
return False
def has_nearby_spoiler(self, acq, stars):
"""
Returns True if ``acq`` has a nearby star that could spoil acquisition.
:param acq: AcqTable Row
:param stars: StarsTable
:returns: bool
"""
if acq['ASPQ1'] == 0:
return False
dy, dz, frac_norm = calc_spoiler_impact(acq, stars)
if np.abs(dy) > 1.5 or np.abs(dz) > 1.5 or frac_norm < 0.95:
self.log(f'Candidate acq star {acq["id"]} rejected due to nearby spoiler(s) '
f'dy={dy:.1f} dz={dz:.1f} frac_norm={frac_norm:.2f}',
id=acq['id'])
return True
else:
return False
def process_include_ids(self, cand_acqs, stars):
"""Ensure that the cand_acqs table has stars that were forced to be included.
Also do validation of include_ids and include_halfws.
:param cand_acqs: candidate acquisition stars table
:param stars: stars table
"""
# Allow for not providing halfws, in which case proseco chooses.
if self.include_halfws is None or len(self.include_halfws) == 0:
self.include_halfws = [0] * len(self.include_ids)
if len(self.include_ids) != len(self.include_halfws):
raise ValueError('include_ids and include_halfws must have same length')
# Ensure values are valid box_sizes
grid_func = interp1d(ACQ.box_sizes, ACQ.box_sizes,
kind='nearest', fill_value='extrapolate')
self.include_optimize_halfw_ids = [
acq_id for acq_id, halfw in zip(self.include_ids, self.include_halfws)
if halfw == 0]
self.include_halfws = grid_func(self.include_halfws).tolist()
super().process_include_ids(cand_acqs, stars)
def select_best_p_acqs(self, cand_acqs, min_p_acq, acq_indices, box_sizes):
"""
Find stars with the highest acquisition probability according to the
algorithm below. ``p_acqs`` is the same-named column from candidate
acq stars and it contains a dict keyed by (box_size, man_err). This
algorithm uses the assumption of man_err=box_size.
- Loop over box sizes in descending order (160, ..., 60)
- Sort in descending order the p_acqs corresponding to that box size
(where largest p_acqs come first)
- Loop over the list and add any stars with p_acq > min_p_acq to the
list of accepted stars.
- If the list is ``n_acq`` long (completely catalog) then stop
This function can be called multiple times with successively smaller
min_p_acq to fill out the catalog. The acq_indices and box_sizes
arrays are appended in place in this process.
:param cand_acqs: AcqTable of candidate acquisition stars
:param min_p_acq: minimum p_acq to include in this round (float)
:param acq_indices: list of indices into cand_acqs of selected stars
:param box_sizes: list of box sizes of selected stars
"""
self.log(f'Find stars with best acq prob for min_p_acq={min_p_acq}')
self.log(f'Current catalog: acq_indices={acq_indices} box_sizes={box_sizes}')
for box_size in ACQ.box_sizes:
# Get array of marginalized (over man_err) p_acq values corresponding
# to box_size for each of the candidate acq stars. For acq's where
# the current box_size is not in the available list then set the
# probability to zero. This happens for small maneuver angles where
# acq.box_sizes might be only [60] or [60, 80].
p_acqs_for_box = np.zeros(len(cand_acqs))
my_box_sizes = cand_acqs['box_sizes']
my_probs = cand_acqs['probs']
for idx in range(len(cand_acqs)):
if box_size in my_box_sizes[idx]:
p_acqs_for_box[idx] = my_probs[idx].p_acq_marg(box_size, self)
self.log(f'Trying search box size {box_size} arcsec', level=1)
if np.all(p_acqs_for_box < min_p_acq):
self.log(f'No acceptable candidates (probably small man angle)', level=2)
continue
indices = np.argsort(-p_acqs_for_box, kind='mergesort')
for acq_idx in indices:
if acq_idx in acq_indices:
continue
acq = cand_acqs[acq_idx]
# Don't consider any stars in the exclude list
if acq['id'] in self.exclude_ids:
continue
p_acq = p_acqs_for_box[acq_idx]
accepted = p_acq > min_p_acq
status = 'ACCEPTED' if accepted else 'rejected'
self.log(f'Star idx={acq_idx:2d} id={acq["id"]:10d} '
f'box={box_size:3d} mag={acq["mag"]:5.1f} p_acq={p_acq:.3f} '
f'{status}',
id=acq['id'],
level=2)
if accepted:
acq_indices.append(acq_idx)
box_sizes.append(box_size)
if len(acq_indices) == self.n_acq:
self.log(f'Found {self.n_acq} acq stars, done')
return
def get_initial_catalog(self):
"""
Get the initial catalog of up to ``n_acq`` candidate acquisition stars. This
updates the current AcqTable (self) in place to add selected stars.
TO DO: these should all just be taken from self
:param cand_acqs: AcqTable of candidate acquisition stars
:param stars: StarsTable of stars in or near the ACA FOV
:param dark: dark current image (ndarray, e-/sec)
:param dither: dither (float, arcsec)
:param t_ccd: CCD temperature (float, degC)
:param date: observation date
"""
cand_acqs = self.cand_acqs
self.log(f'Getting initial catalog from {len(cand_acqs)} candidates')
# Build up the initial catalog as a list of indices into cand_acqs
# and the corresponding initial box size (halfw).
acq_indices = []
box_sizes = []
# Start with force-include stars, if any.
if self.include_ids:
self.log(f'Processing force-include ids={self.include_ids} '
f'halfws={self.include_halfws}')
# Re-order candidate acqs to put those in the include list first
ok = np.in1d(cand_acqs['id'], self.include_ids)
idxs = np.concatenate([np.where(ok)[0], np.where(~ok)[0]])
cand_acqs = cand_acqs[idxs]
n_include = len(self.include_ids)
for min_p_acq in (0.75, 0.5, 0.25, 0.05, -1):
if len(acq_indices) < n_include:
# Select candidates meeting min_p_acq, and update
# acq_indices, box_sizes in place
self.select_best_p_acqs(cand_acqs[:n_include], min_p_acq,
acq_indices, box_sizes)
# This should never happen but be careful
if len(acq_indices) != n_include:
raise RuntimeError(f'failure in force-include')
# For include stars where the halfw is not going to be optimized
# then then override the box size that was just found with the
# user-supplied value.
for include_id, include_halfw in zip(self.include_ids, self.include_halfws):
if include_id not in self.include_optimize_halfw_ids:
# Find the position in box_sizes that corresponds to include_id
# and set to the specified include_halfw.
for idx in range(len(acq_indices)):
if include_id == cand_acqs[acq_indices[idx]]['id']:
box_sizes[idx] = include_halfw
break
# Now accumulate indices and box sizes of candidate acq stars that meet
# successively less stringent minimum p_acq.
for min_p_acq in (0.75, 0.5, 0.25, 0.05):
if len(acq_indices) < self.n_acq:
# Updates acq_indices, box_sizes in place
self.select_best_p_acqs(cand_acqs, min_p_acq, acq_indices, box_sizes)
if len(acq_indices) == self.n_acq:
break
# Make all the not-accepted candidate acqs have halfw=120 as a reasonable
# default and then set the accepted acqs to the best box_size. Then set
# p_acq to the marginalized acquisition probability.
cand_acqs['halfw'] = np.minimum(120, cand_acqs['halfw'])
cand_acqs['halfw'][acq_indices] = box_sizes
cand_acqs.update_p_acq_column(self)
# Finally select the initial catalog
acqs_init = cand_acqs[acq_indices]
# Transfer to acqs (which at this point is an empty table)
for col in acqs_init.itercols():
self[col.info.name] = col
def calc_p_brightest(self, acq, box_size, man_err=0, bgd=0):
"""
Calculate the probability that the `acq` star is the brightest
candidate in the search box.
This caches the spoiler and imposter stars in the acqs table (the row
corresponding to ``acq``). It is required that the first time this is
called that the box_size and man_err be the maximum, and this is checked.
:param acq: acq stars (AcqTable Row)
:param box_size: box size (float, arcsec)
:param man_err: maneuver error (float, arcsec, default=0)
:param bgd: assume background for imposters (float, e-sec, default=0)
:returns: probability that acq is the brightest (float)
"""
stars = self.stars
dark = self.dark
dither = self.dither
# Spoilers
ext_box_size = box_size + man_err
kwargs = dict(stars=stars, acq=acq, box_size=ext_box_size)
spoilers = get_intruders(acq, ext_box_size, 'spoilers',
n_sigma=2.0, # TO DO: put to characteristics
get_func=get_spoiler_stars, kwargs=kwargs)
# Imposters
ext_box_size = box_size + dither
kwargs = dict(star_row=acq['row'], star_col=acq['col'],
maxmag=acq['mag'] + acq['mag_err'],
box_size=ext_box_size,
dark=dark,
bgd=bgd, # TO DO deal with this
mag_limit=self.imposters_mag_limit
)
imposters = get_intruders(acq, ext_box_size, 'imposters',
n_sigma=1.0, # TO DO: put to characteristics
get_func=get_imposter_stars, kwargs=kwargs)
mags = np.concatenate([spoilers['mag'], imposters['mag']])
mag_errs = np.concatenate([spoilers['mag_err'], imposters['mag_err']])
prob = calc_p_brightest_compare(acq, mags, mag_errs)
return prob
def calc_p_safe(self, verbose=False):
"""
Calculate the probability of a safing action resulting from failure
to acquire at least two (2) acquisition stars.
This uses the probability of 2 or fewer stars => "conservative" p_fail at this
man_err. This uses 2 stars instead of 1 or fewer (which is the actual criteria
for a safing action). This allows for one star to be dropped for reasons not
reflected in the acq model probability and makes the optimization dig a bit deeper
in to the catalog beyond the brightest stars.
:returns: p_safe (float)
"""
p_no_safe = 1.0
self_halfws = self['halfw']
self_probs = self['probs']
for man_err, p_man_err in zip(ACQ.man_errs, self.p_man_errs):
if p_man_err == 0.0:
continue
p_acqs = [prob.p_acqs(halfw, man_err, self)
for halfw, prob in zip(self_halfws, self_probs)]
p_n_cum = prob_n_acq(p_acqs)[1] # This returns (p_n, p_n_cum)
# Probability of 2 or fewer stars => conservative fail criteria
p2 = p_n_cum[2]
if verbose:
self.log(f'man_err = {man_err}, p_man_err = {p_man_err}')
self.log('p_acqs =' + ' '.join(['{:.3f}'.format(val) for val in p_acqs]))
self.log('log10(p 2_or_fewer) = {:.2f}'.format(np.log10(p2)))
p_no_safe *= (1 - p_man_err * p2)
p_safe = 1 - p_no_safe
self.p_safe = p_safe
return p_safe
def optimize_acq_halfw(self, idx, p_safe, verbose=False):
"""
Optimize the box size (halfw) for the acq star ``idx`` in the current acqs
table. Assume current ``p_safe``.
:param idx: acq star index
:param p_safe: current value of p_safe
:param verbose: include extra information in the run log
:returns improved, p_safe: whether p_safe was improved and the new value
"""
acq = self[idx]
orig_halfw = acq['halfw']
orig_p_acq = acq['probs'].p_acq_marg(acq['halfw'], self)
self.log(f'Optimizing halfw for idx={idx} id={acq["id"]}', id=acq['id'])
# Compute p_safe for each possible halfw for the current star
p_safes = []
box_sizes = acq['box_sizes']
for box_size in box_sizes:
new_p_acq = acq['probs'].p_acq_marg(box_size, self)
# Do not reduce marginalized p_acq to below 0.1. It can happen that p_safe
# goes down very slightly with an increase in box size from the original,
# and then the box size gets stuck there because of the deadband for later
# reducing box size.
if new_p_acq < 0.1 and new_p_acq < orig_p_acq:
self.log(f'Skipping halfw {box_size}: new marg p_acq < 0.1 and new < orig'
f' ({new_p_acq:.3f} < {orig_p_acq:.3f})')
p_safes.append(p_safe)
else:
acq['halfw'] = box_size
p_safes.append(self.calc_p_safe(verbose))
# Find best p_safe
min_idx = np.argmin(p_safes)
min_p_safe = p_safes[min_idx]
min_halfw = box_sizes[min_idx]
# If p_safe went down, then consider this an improvement if either:
# - acq halfw is increased (bigger boxes are better)
# - p_safe went down by at least 10%
# So avoid reducing box sizes for only small improvements in p_safe.
improved = ((min_p_safe < p_safe) and
((min_halfw > orig_halfw) or (min_p_safe / p_safe < 0.9)))
p_safes_strs = [f'{np.log10(p):.2f} ({box_size}")'
for p, box_size in zip(p_safes, box_sizes)]
self.log('p_safes={}'.format(', '.join(p_safes_strs)), level=1, id=acq['id'])
self.log('min_p_safe={:.2f} p_safe={:.2f} min_halfw={} orig_halfw={} improved={}'
.format(np.log10(min_p_safe), np.log10(p_safe),
min_halfw, orig_halfw, improved),
level=1, id=acq['id'])
if improved:
self.log(f'Update acq idx={idx} halfw from {orig_halfw} to {min_halfw}',
level=1, id=acq['id'])
p_safe = min_p_safe
acq['halfw'] = min_halfw
else:
acq['halfw'] = orig_halfw
return p_safe, improved
def optimize_acqs_halfw(self, verbose=False):
"""
Optimize the box_size (halfw) for the acq stars in the current catalog.
This cycles through each star and optimizes the box size for that star
using the ``optimize_acq_halfw()`` method.
:param verbose: include additional information in the run log
"""
p_safe = self.calc_p_safe()
idxs = self['p_acq'].argsort()
# Any updates made?
any_improved = False
for idx in idxs:
# Don't optimize halfw for a star that is specified for inclusion
# with a valid (non-zero) halfw set. The set of include_optimize_halfw_ids is
# any ids where halfw=0 was provided.
if self['id'][idx] in set(self.include_ids) - set(self.include_optimize_halfw_ids):
continue
p_safe, improved = self.optimize_acq_halfw(idx, p_safe, verbose)
any_improved |= improved
return p_safe, any_improved
def optimize_catalog(self, verbose=False):
"""
Optimize the current acquisition catalog.
:param verbose: include additional information in the run log
"""
# If every acq star is specified as included, then no optimization
if all(acq['id'] in self.include_ids for acq in self):
return
p_safe = self.calc_p_safe(verbose=True)
self.log('initial log10(p_safe)={:.2f}'.format(np.log10(p_safe)))
# Start by optimizing the half-widths of the initial catalog
for _ in range(5):
p_safe, improved = self.optimize_acqs_halfw(verbose)
if not improved:
break
self.log(f'After optimizing initial catalog p_safe = {p_safe:.5f}')
# Now try to swap in a new star from the candidate list and see if
# it can improve p_safe. Skips candidates already in the catalog
# or specifically excluded.
skip_acq_ids = set(self['id']) | set(self.exclude_ids)
for cand_acq in self.cand_acqs:
cand_id = cand_acq['id']
if cand_id in skip_acq_ids:
continue
# Get the index of the worst p_acq in the catalog, excluding acq stars
# that are in include_ids (since they are not to be replaced).
ok = [acq['id'] not in self.include_ids for acq in self]
# acqs = self[ok]
acqs_probs_ok = self['probs'][ok]
acqs_halfw_ok = self['halfw'][ok]
acqs_id_ok = self['id'][ok]
# Sort by the marginalized acq probability for the current box size
p_acqs = [acq_probs.p_acq_marg(acq_halfw, self)
for acq_probs, acq_halfw in zip(acqs_probs_ok, acqs_halfw_ok)]
# TODO: performance?
idx_worst = np.argsort(p_acqs, kind='mergesort')[0]
idx = self.get_id_idx(acqs_id_ok[idx_worst])
self.log('Trying to use {} mag={:.2f} to replace idx={} with p_acq={:.3f}'
.format(cand_id, cand_acq['mag'], idx, p_acqs[idx_worst]), id=cand_id)
# Make a copy of the row (acq star) as a numpy void (structured array row)
orig_acq = self[idx].as_void()
# Stub in the new candidate and get the best halfw (and corresponding new p_safe)
self[idx] = cand_acq
new_p_safe, improved = self.optimize_acq_halfw(idx, p_safe, verbose)
# If the new star is noticably better (regardless of box size), OR
# comparable but with a bigger box, then accept it and do one round of
# full catalog box-size optimization.
improved = ((new_p_safe / p_safe < 0.9) or
(new_p_safe < p_safe and self['halfw'][idx] > orig_acq['halfw']))
if improved:
p_safe, improved = self.optimize_acqs_halfw(verbose)
self.calc_p_safe(verbose=True)
self.log(f' accepted, new p_safe = {p_safe:.5f}', id=cand_id)
else:
self[idx] = orig_acq
def get_spoiler_stars(stars, acq, box_size):
"""
Get acq spoiler stars, i.e. any star in the specified box_size (which
would normally be an extended box including man_err).
OBC adjusts search box position based on the difference between estimated
and target attitude (which is the basis for yang/zang in catalog). Dither
is included in the adjustment, so the only remaining term is the
maneuver error, which is included via the ``man_err`` box extension.
Imagine a 500 arcsec dither pattern. OBC adjusts search box for that,
so apart from actual man err the box will be centered on the acq star.
See this ref for information on how well the catalog mag errors correlate
with observed. Answer: not exactly, but probably good enough. Plots all
the way at the bottom are key::
http://nbviewer.jupyter.org/url/cxc.harvard.edu/mta/ASPECT/
ipynb/ssawg/2018x03x21/star-mag-uncertainties.ipynb
TO DO: consider mag uncertainties at the faint end related to
background subtraction and warm pixel corruption of background.
:param stars: StarsTable of stars for this field
:param acq: acquisition star (AcqTable Row)
:param box_size: box size (float, arcsec)
:returns: numpy structured array of spoiler stars
"""
stars = stars.as_array()
# 1-sigma of difference of stars['mag'] - acq['mag']
# TO DO: lower limit clip?
mag_diff_err = np.sqrt(stars['mag_err'] ** 2 + acq['mag_err'] ** 2)
# Stars in extended box and within 3-sigma (99.7%)
ok = ((np.abs(stars['yang'] - acq['yang']) < box_size) &
(np.abs(stars['zang'] - acq['zang']) < box_size) &
(stars['mag'] - acq['mag'] < 3 * mag_diff_err) &
(stars['id'] != acq['id'])
)
spoilers = stars[ok]
spoilers.sort(order=['mag'])
return spoilers
def get_imposter_stars(dark, star_row, star_col, thresh=None,
maxmag=11.5, box_size=120, bgd=40, mag_limit=20.0, test=False):
"""
Note: current alg purposely avoids using the actual flight background
calculation because this is unstable to small fluctuations in values
and often over-estimates background. Using this can easily miss a
search hit that the flight ACA will detect. So just use a mean
dark current ``bgd``.
:param dark: dark current image (ndarray, e-/sec)
:param star_row: row of acq star (float)
:param star_col: col of acq star (float)
:param thresh: PEA search hit threshold for a 2x2 block (e-/sec)
:param maxmag: Max mag (alternate way to specify search hit ``thresh``)
:param box_size: box size (arcsec)
:param bgd: assumed flat background (float, e-/sec)
:param mag_limit: Max mag for imposter (using 6x6 readout)
:param test: hook for convenience in algorithm testing
:returns: numpy structured array of imposter stars
"""
# Convert row/col to array index coords unless testing.
rc_off = 0 if test else 512
acq_row = int(star_row + rc_off)
acq_col = int(star_col + rc_off)
box_row = int(box_size.row)
box_col = int(box_size.col)
# Make sure box is within CCD
box_r0 = np.clip(acq_row - box_row, 0, 1024)
box_r1 = np.clip(acq_row + box_row, 0, 1024)
box_c0 = np.clip(acq_col - box_col, 0, 1024)
box_c1 = np.clip(acq_col + box_col, 0, 1024)
# Make sure box has even number of pixels on each edge. Increase
# box by one if needed.
#
# TO DO: Test the clipping and shrinking code
#
if (box_r1 - box_r0) % 2 == 1:
if box_r1 == 1024:
box_r0 -= 1
else:
box_r1 += 1
if (box_c1 - box_c0) % 2 == 1:
if box_c1 == 1024:
box_c0 -= 1
else:
box_c1 += 1
# Get bgd-subtracted dark current image corresponding to the search box
# and bin in 2x2 blocks.
dc2x2 = bin2x2(dark[box_r0:box_r1, box_c0:box_c1]) - bgd * 4
if test:
print(dc2x2)
# PEA search hit threshold for a 2x2 block based on count_rate(MAXMAG) / 4
if thresh is None:
thresh = mag_to_count_rate(maxmag) / 4 # e-/sec
# Get an image ``dc_labeled`` which same shape as ``dc2x2`` but has
# contiguous regions above ``thresh`` labeled with a unique index.
# This is a one-line way of doing the PEA merging process, roughly.
dc_labeled, n_hits = ndimage.label(dc2x2 > thresh)
if test:
print(dc_labeled)
# If no hits just return empty list
if n_hits == 0:
return []
outs = []
for idx in range(n_hits):
# Get row and col index vals for each merged region of search hits
rows, cols = np.where(dc_labeled == idx + 1)
vals = dc2x2[rows, cols]
# Centroid row, col in 2x2 binned coords. Since we are using edge-based
# coordinates, we need to at 0.5 pixels to coords for FM centroid calc.
# A single pixel at coord (0, 0) has FM centroid (0.5, 0.5).
rows = rows + 0.5
cols = cols + 0.5
vals_sum = np.sum(vals)
r2x2 = np.sum(rows * vals) / vals_sum
c2x2 = np.sum(cols * vals) / vals_sum
# Integer centroid row/col (center of readout image 8x8 box)
c_row = int(np.round(box_r0 + 2 * r2x2))
c_col = int(np.round(box_c0 + 2 * c2x2))
# Reject if too close to CCD edge
if (c_row < 4 or c_row > dark.shape[0] - 4 or
c_col < 4 or c_col > dark.shape[1] - 4):
continue
img, img_sum, mag, row, col = get_image_props(dark, c_row, c_col, bgd)
if mag > mag_limit:
continue
if pea_reject_image(img):
continue
# Revert to ACA coordinates (row,col => -512:512) unless testing, where
# it is more convenient to just use normal array index coords.
if not test:
row -= 512
col -= 512
c_row -= 512
c_col -= 512
yang, zang = pixels_to_yagzag(row, col, allow_bad=True)
out = (row,
col,
row - star_row,
col - star_col,
yang,
zang,
c_row - 4,
c_col - 4,
img,
img_sum,
mag,
get_mag_std(mag).item(),
)
outs.append(out)
if len(outs) > 0:
dtype = [('row', '<f8'), ('col', '<f8'), ('d_row', '<f8'), ('d_col', '<f8'),
('yang', '<f8'), ('zang', '<f8'), ('row0', '<i8'), ('col0', '<i8'),
('img', 'f8', (8, 8)), ('img_sum', '<f8'), ('mag', '<f8'), ('mag_err', '<f8')]
outs = np.rec.fromrecords(outs, dtype=dtype)
outs.sort(order=['mag'])
return outs
def calc_p_brightest_compare(acq, mags, mag_errs):
"""
For given ``acq`` star and intruders mag, mag_err,
do the probability calculation to see if the acq star is brighter
than all of them.
:param acq: acquisition star (AcqTable Row)
:param mags: iterable of mags
:param mag_errs: iterable of mag errors
:returns: probability that acq stars is brighter than all mags
"""
if len(mags) == 0:
return 1.0
n_pts = 100
x0, x1 = stats.norm.ppf([0.001, 0.999], loc=acq['mag'], scale=acq['mag_err'])
x = np.linspace(x0, x1, n_pts)
dx = (x1 - x0) / (n_pts - 1)
acq_pdf = stats.norm.pdf(x, loc=acq['mag'], scale=acq['mag_err'])
sp_cdfs = []
for mag, mag_err in zip(mags, mag_errs):
# Compute prob intruder is fainter than acq (so sp_mag > x).
# CDF is prob that sp_mag < x, so take 1-CDF.
sp_cdf = stats.norm.cdf(x, loc=mag, scale=mag_err)
sp_cdfs.append(1 - sp_cdf)
prod_sp_cdf = np.prod(sp_cdfs, axis=0).clip(1e-30)
# Do the integral ∫ dθ p(θ|t) Πm≠t p(θ<θt|m)
prob = np.sum(acq_pdf * prod_sp_cdf * dx)
return prob
def get_intruders(acq, box_size, name, n_sigma, get_func, kwargs):
"""
Get intruders table for name='spoilers' or 'imposters' from ``acq``.
If not already in acq then call ``get_func(**kwargs)`` to get it.
:param acq: acq stars (AcqTable Row)
:param box_size: box size (float, arcsec)
:param name: intruder name ('spoilers' | 'imposters')
:param n_sigma: sigma threshold for comparisons
:param get_func: function to actually get spoilers or imposters
:param kwargs: kwargs to pass to get_func()
:returns: dict with keys yang, zang, mag, mag_err.
"""
name_box = name + '_box'
intruders = acq[name]
box_size = ACABox(box_size)
if intruders is None:
intruders = get_func(**kwargs)
acq[name_box] = box_size
if len(intruders) > 0:
# Clip to within n_sigma. d_mag < 0 for intruder brighter than acq
d_mag = intruders['mag'] - acq['mag']
d_mag_err = np.sqrt(intruders['mag_err'] ** 2 + acq['mag_err'] ** 2)
ok = d_mag < n_sigma * d_mag_err
intruders = intruders[ok]
acq[name] = intruders
else:
# Ensure cached spoilers cover the current case.
if box_size > acq[name_box]:
raise ValueError(f'box_size is greater than {name_box}')
colnames = ['yang', 'zang', 'mag', 'mag_err']
if len(intruders) == 0:
intruders = {name: np.array([], dtype=np.float64) for name in colnames}
else:
ok = ((np.abs(intruders['yang'] - acq['yang']) < box_size.y) &
(np.abs(intruders['zang'] - acq['zang']) < box_size.z))
intruders = {name: intruders[name][ok] for name in ['mag', 'mag_err']}
return intruders
def calc_p_on_ccd(row, col, box_size):
"""
Calculate the probability that star and initial tracked readout box
are fully within the usable part of the CCD.
Note that ``box_size`` here is not a search box size, it is normally
``man_err + dither`` and reflects the size of the box where the star can
land on the CCD. This is independent of the search box size, but does
assume that man_err < search box size. This is always valid because
this function only gets called in that case (otherwise p_acq is just
set to 0.0 in calc_p_safe. Dither does not enter into the
``man_err < search box size`` relation because the OBC accounts for
dither when setting the search box position.
This uses a simplistic calculation which assumes that ``p_on_ccd`` is
just the fraction of box area that is within the effective usable portion
of the CCD.
:param row: row coordinate of star (float)
:param col: col coordinate of star (float)
:param box_size: box size (ACABox)
:returns: probability the star is on usable part of CCD (float)
"""
p_on_ccd = 1.0
# Require that the readout box when candidate acq star is evaluated
# by the PEA (via a normal 8x8 readout) is fully on the CCD usable area.
# Do so by reducing the effective CCD usable area by the readout
# halfwidth (noting that there is a leading row before 8x8).
max_ccd_row = ACA.max_ccd_row - 5
max_ccd_col = ACA.max_ccd_col - 4
for rc, max_rc, half_width in ((row, max_ccd_row, box_size.row),
(col, max_ccd_col, box_size.col)):
# Pixel boundaries are symmetric so just take abs(row/col)
rc1 = abs(rc) + half_width
full_width = half_width * 2
pix_off_ccd = rc1 - max_rc
if pix_off_ccd > 0:
# Reduce p_on_ccd by fraction of pixels inside usable area.
pix_inside = full_width - pix_off_ccd
if pix_inside > 0:
p_on_ccd *= pix_inside / full_width
else:
p_on_ccd = 0.0
return p_on_ccd
class AcqProbs:
def __init__(self, acqs, acq, dither, stars, dark, t_ccd, date):
"""Calculate probabilities related to acquisition, in particular an element
in the ``p_acqs`` matrix which specifies star acquisition probability
for given search box size and maneuver error.
This sets these attributes:
- ``p_brightest``: probability this star is the brightest in box (function
of ``box_size`` and ``man_err``)
- ``p_acq_model``: probability of acquisition from the chandra_aca model
(function of ``box_size``)
- ``p_on_ccd``: probability star is on the usable part of the CCD (function
of ``man_err`` and ``dither``)
- ``p_acqs``: product of the above three
Since chandra_aca 4.24 with the grid-floor model, the acquisition
probability model value is multiplied here by 0.985 in order to help
the optimization algorithm converge to a good solution. The grid-floor
model has accurate p_fail values for bright stars, in the range of
0.005, but for catalogs with some bright stars this ends up skewing the
p_safe calculation to where the box size of fainter stars does not make
enough impact to get optimized.
:param acqs: acqs table (AcqTable)
:param acq: acq star (AcqTable Row) in the candidate acqs table
:param dither: dither (float, arcsec)
:param stars: stars table
:param dark: dark current map
:param t_ccd: CCD temperature (float, degC)
:param date: observation date
"""
self._p_brightest = {}
self._p_acq_model = {}
self._p_on_ccd = {}
self._p_acqs = {}
self._p_acq_marg = {}
self._p_fid_spoiler = {}
self._p_fid_id_spoiler = {}
# Convert table row to plain dict for persistence
self.acq = {key: acq[key] for key in ('yang', 'zang')}
for box_size in ACQ.box_sizes:
# Need to iterate over man_errs in reverse order because calc_p_brightest
# caches interlopers based on first call, so that needs to have the largest
# box sizes.
for man_err in ACQ.man_errs[::-1]:
if man_err > box_size:
p_brightest = self._p_brightest[box_size, man_err] = 0.0
else:
# Prob of being brightest in box (function of box_size and
# man_err, independently because imposter prob is just a
# function of box_size not man_err). Technically also a
# function of dither, but that does not vary here.
p_brightest = acqs.calc_p_brightest(acq, box_size=box_size,
man_err=man_err)
self._p_brightest[box_size, man_err] = p_brightest
# Acquisition probability model value (function of box_size only)
for box_size in ACQ.box_sizes:
p_acq_model = acq_success_prob(date=date, t_ccd=t_ccd,
mag=acq['mag'], color=acq['color'],
spoiler=False, halfwidth=box_size)
self._p_acq_model[box_size] = p_acq_model * 0.985
# Probability star is in acq box (function of man_err and dither only)
for man_err in ACQ.man_errs:
p_on_ccd = calc_p_on_ccd(acq['row'], acq['col'], box_size=man_err + dither)
self._p_on_ccd[man_err] = p_on_ccd
def p_on_ccd(self, man_err):
return self._p_on_ccd[man_err]
def p_brightest(self, box_size, man_err, acqs):
assert acqs.cand_acqs is not None
return self._p_brightest[box_size, man_err]
def p_acq_model(self, box_size):
return self._p_acq_model[box_size]
def p_acqs(self, box_size, man_err, acqs):
assert acqs.cand_acqs is not None
fid_set = acqs.fid_set
try:
return self._p_acqs[box_size, man_err, fid_set]
except KeyError:
p_acq = (self.p_brightest(box_size, man_err, acqs) *
self.p_acq_model(box_size) *
self.p_on_ccd(man_err) *
self.p_fid_spoiler(box_size, acqs))
self._p_acqs[box_size, man_err, fid_set] = p_acq
return p_acq
def p_acq_marg(self, box_size, acqs):
assert acqs.cand_acqs is not None
fid_set = acqs.fid_set
try:
return self._p_acq_marg[box_size, fid_set]
except KeyError:
p_acq_marg = 0.0
for man_err, p_man_err in zip(ACQ.man_errs, acqs.p_man_errs):
p_acq_marg += self.p_acqs(box_size, man_err, acqs) * p_man_err
self._p_acq_marg[box_size, fid_set] = p_acq_marg
return p_acq_marg
def p_fid_spoiler(self, box_size, acqs):
"""
Return the probability multiplier based on any fid in the current fid set spoiling
this acq star (within ``box_size``). The current fid set is a property of the
``fids`` table. The output value will be 1.0 for no spoilers and 0.0 for one or
more spoiler (normally there can be at most one fid spoiler).
This caches the values in a dict for subsequent access.
:param acqs:
:param box_size: search box size in arcsec
:returns: probability multiplier (0 or 1)
"""
assert acqs.cand_acqs is not None
fid_set = acqs.fid_set
try:
return self._p_fid_spoiler[box_size, fid_set]
except KeyError:
p_fid_spoiler = 1.0
# If there are fids then multiplier the individual fid spoiler probs
for fid_id in fid_set:
p_fid_spoiler *= self.p_fid_id_spoiler(box_size, fid_id, acqs)
self._p_fid_spoiler[box_size, fid_set] = p_fid_spoiler
return p_fid_spoiler
def p_fid_id_spoiler(self, box_size, fid_id, acqs):
"""
Return the probability multiplier for fid ``fid_id`` spoiling this acq star (within
``box_size``). The output value will be 0.0 if this fid spoils this acq, otherwise
set to 1.0 (no impact).
This caches the values in a dict for subsequent access.
:param acqs:
:param box_size: search box size in arcsec
:returns: probability multiplier (0 or 1)
"""
assert acqs.cand_acqs is not None
try:
return self._p_fid_id_spoiler[box_size, fid_id]
except KeyError:
fids = acqs.fids
if fids is None:
acqs.add_warning('Requested fid spoiler probability without '
'setting acqs.fids first')
return 1.0
p_fid_id_spoiler = 1.0
try:
fid = fids.cand_fids.get_id(fid_id)
except (KeyError, IndexError, AssertionError):
# This should not happen, but ignore with a warning in any case. Non-candidate
# fid cannot spoil an acq star.
acqs.add_warning(f'Requested fid spoiler probability for fid '
f'{acqs.detector}-{fid_id} but it is '
f'not a candidate')
else:
if fids.spoils(fid, self.acq, box_size):
p_fid_id_spoiler = 0.0
self._p_fid_id_spoiler[box_size, fid_id] = p_fid_id_spoiler
return p_fid_id_spoiler
def get_p_man_err(man_err, man_angle):
"""
Probability for given ``man_err`` given maneuver angle ``man_angle``.
:param man_err: maneuver error (float, arcsec)
:param man_angle: maneuver angle (float, deg)
:returns: probability of man_error for given man_angle
"""
pmea = ACQ.p_man_errs_angles # [0, 5, 20, 40, 60, 80, 100, 120, 180]
pme = ACQ.p_man_errs
man_angle_idx = np.searchsorted(pmea, man_angle) if (man_angle > 0) else 1
name = '{}-{}'.format(pmea[man_angle_idx - 1], pmea[man_angle_idx])
man_err_idx = np.searchsorted(pme['man_err_hi'], man_err)
if man_err_idx == len(pme):
raise ValueError(f'man_err must be <= {pme["man_err_hi"]}')
return pme[name][man_err_idx]
| python | 56,895 |
# Generated by Django 2.2.12 on 2020-05-25 08:02
import json
from hashlib import blake2b
from django.db import migrations, models
def get_hash(credentials):
keys = {
'url',
'counter_version',
'requestor_id',
'customer_id',
'http_username',
'http_password',
'api_key',
'extra_params',
}
data = {key: getattr(credentials, key) for key in keys}
dump = json.dumps(data, ensure_ascii=False, sort_keys=True)
return blake2b(dump.encode('utf-8'), digest_size=16).hexdigest()
def fill_version_hash(apps, schema_editor):
"""
The model does not have the usual methods, etc. here, so we use a
local implementation of the hash computation
"""
SushiCredentials = apps.get_model('sushi', 'SushiCredentials')
for credentials in SushiCredentials.objects.all():
credentials.version_hash = get_hash(credentials)
credentials.save()
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('sushi', '0028_sushifetchattempt_credentials_version_hash'),
]
operations = [
migrations.AddField(
model_name='sushicredentials',
name='version_hash',
field=models.CharField(
default='', help_text='Current hash of model attributes', max_length=32
),
preserve_default=False,
),
migrations.RunPython(fill_version_hash, noop),
]
| python | 1,494 |
#!/usr/bin/python3
import json
from json import JSONDecodeError
import argparse
import urllib.request
from colorama import Fore
from prettytable import PrettyTable
statistics = "/var/log/dystopia/statistics.json"
key_file = "/var/log/dystopia/ipstack.key"
def print_message(message):
print(Fore.GREEN + "[*] " + Fore.WHITE + message)
def print_error(message):
print(Fore.RED + "[-] " + Fore.WHITE + message)
def print_warning(message):
print(Fore.YELLOW + "[!] " + Fore.WHITE + message)
def read_json_file(filename):
if filename is None:
print_error("file was not found!")
exit()
try:
with open(filename, "r") as outfile:
data = json.load(outfile)
return data
except JSONDecodeError as e:
print_error(
"file: " + statistics + " might be corrupted! JSONDecodeError: " + str(e)
)
exit()
except FileNotFoundError:
print_error("file: '{}' was not found.".format(filename))
exit()
def write_to_file(filename, data):
try:
with open(filename, "a+") as f:
f.write(data)
except FileNotFoundError:
print_error("file: '{}' was not found.".format(filename))
exit()
def get_access_key():
try:
with open(key_file, "r") as f:
content = f.readlines()
return content[0]
except FileNotFoundError:
return None
def get_geo_data(address):
key = get_access_key()
key = key.strip()
if key is None or len(key) == 0:
return None
url = "http://api.ipstack.com/"
url = url + address.strip() + "?access_key=" + key
try:
with urllib.request.urlopen(url) as url:
data = json.loads(url.read().decode())
return data
except urllib.error.URLError:
print_error("Connection refused: "+url)
exit()
class Statistics:
def __init__(self):
self.ips = []
self.sort = args.sort
self.update = args.update
if self.update:
print_message("Updating geolocation data!")
self.filename = args.filename
self.table = PrettyTable()
self.table.field_names = [
"IP Address",
"Times Connected",
"Failed Logins",
"Correct Logins",
"Continent Name",
"Country Name",
"Region Name",
"Zip",
"latitude",
"longitude",
]
if args.address is not None:
self.address = args.address
self.data = read_json_file(statistics)
for ip, stat in self.data.items():
self.ips.append(ip)
def show_report(self):
for ip in self.ips:
self.table.add_row(
[
ip,
self.data[ip]["Times Connected"],
self.data[ip]["Failed Logins"],
self.data[ip]["Correct Logins"],
self.data[ip]["Continent Name"],
self.data[ip]["Country Name"],
self.data[ip]["Region Name"],
self.data[ip]["Zip"],
self.data[ip]["latitude"],
self.data[ip]["longitude"],
]
)
print(self.table.get_string(sortby=self.sort, sortKey=lambda row: int(row[0])))
if self.save is not None:
Statistics.save(self)
def show_address_report(self):
try:
self.table.add_row(
[
self.address,
self.data[self.address]["Times Connected"],
self.data[self.address]["Failed Logins"],
self.data[self.address]["Correct Logins"],
self.data[self.address]["Continent Name"],
self.data[self.address]["Country Name"],
self.data[self.address]["Region Name"],
self.data[self.address]["Zip"],
self.data[self.address]["latitude"],
self.data[self.address]["longitude"],
]
)
except KeyError:
print_error("Address: " + self.address + " not found!")
exit()
print(self.table)
if self.save is not None:
Statistics.save(self)
def geolocation(self):
for ip in self.ips:
try:
_t = self.data[ip]["Zip"]
if self.update:
raise KeyError
except KeyError:
json_data = get_geo_data(ip)
if json_data is None:
print_warning(
"Could not fetch geolocation data please put your api key here:"
+ key_file
)
self.data[ip]["Continent Name"] = None
self.data[ip]["Country Name"] = None
self.data[ip]["Region Name"] = None
self.data[ip]["Zip"] = None
self.data[ip]["latitude"] = None
self.data[ip]["longitude"] = None
else:
self.data[ip]["Continent Name"] = json_data["continent_name"]
self.data[ip]["Country Name"] = json_data["country_name"]
self.data[ip]["Region Name"] = json_data["region_name"]
self.data[ip]["Zip"] = json_data["zip"]
self.data[ip]["latitude"] = json_data["latitude"]
self.data[ip]["longitude"] = json_data["longitude"]
def update_statistics_file(self):
with open(statistics, "w+") as f:
json.dump(self.data, f, indent=4, ensure_ascii=False)
def save(self):
html = self.table.get_html_string()
if self.filename is not None:
if self.filename.endswith(".html"):
write_to_file(self.filename, html)
else:
self.filename = self.filename + ".html"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="dstat | Statistics tool for Dystopia")
parser.add_argument("--address", "-a", help="ip address to investigate")
parser.add_argument(
"--report",
"-r",
help="show a general report",
action="store_true",
default=False,
)
parser.add_argument("--sort", "-s", help="sort the report table by row name")
parser.add_argument(
"--update",
"-U",
help="update geolocation entries",
action="store_true",
default=False,
)
parser.add_argument("--filename", "-f", help="Filename of report file")
args = parser.parse_args()
s = Statistics()
Statistics.geolocation(s)
Statistics.update_statistics_file(s)
if args.report:
Statistics.show_report(s)
elif args.address is not None:
Statistics.show_address_report(s)
| python | 6,952 |
# Generated by Django 3.0.7 on 2021-02-17 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0106_auto_20210212_1645'),
]
operations = [
migrations.AddField(
model_name='entity',
name='legal_name',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AddField(
model_name='entity',
name='registered_address',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='entity',
name='registration_id',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AddField(
model_name='entity',
name='sustainability_officer',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='entity',
name='sustainability_officer_phone_number',
field=models.CharField(blank=True, max_length=32, null=True),
),
]
| python | 1,156 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, 2014 Scalr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gevent import monkey
monkey.patch_all()
import os
import sys
cwd = os.path.dirname(os.path.abspath(__file__))
scalrpy_dir = os.path.join(cwd, '..')
sys.path.insert(0, scalrpy_dir)
import re
import gzip
import socket
import gevent
import pymysql
import requests
import StringIO
import datetime
from pkg_resources import parse_version
from xml.dom import minidom
from scalrpy.util import rpc
from scalrpy.util import helper
from scalrpy.util import dbmanager
from scalrpy.util import cryptotool
from scalrpy.util import application
from scalrpy.util import schedule_parser
from scalrpy import LOG
from scalrpy import exceptions
helper.patch_gevent()
app = None
eol_os = ['ubuntu-10-04']
class SzrUpdClient(rpc.HttpServiceProxy):
def __init__(self, host, port, key, headers=None):
endpoint = 'http://%s:%s' % (host, port)
security = rpc.Security(key)
super(SzrUpdClient, self).__init__(endpoint, security=security, headers=headers)
class SzrUpdService(application.ScalrIterationApplication):
def __init__(self, argv=None):
self.description = "Scalr scalarizr update service"
super(SzrUpdService, self).__init__(argv=argv, interval=True)
self.config.update({
'interval': 600,
'chunk_size': 100,
'pool_size': 100,
})
self._db = None
self._pool = None
def configure(self):
helper.update_config(
self.scalr_config.get('scalarizr_update', {}).get('service', {}), self.config)
helper.validate_config(self.config)
if self.config['interval']:
self.iteration_timeout = int(self.config['interval'])
socket.setdefaulttimeout(self.config['instances_connection_timeout'])
self._db = dbmanager.ScalrDB(self.config['connections']['mysql'])
self._pool = helper.GPool(pool_size=self.config['pool_size'])
def clear_cache(self):
if hasattr(self.get_szr_ver_from_repo.im_func, 'cache'):
delattr(self.get_szr_ver_from_repo.im_func, 'cache')
if hasattr(self.get_szr_ver_from_repo.im_func, 'devel_cache'):
delattr(self.get_szr_ver_from_repo.im_func, 'devel_cache')
deb_pattern = re.compile(r'Package: scalarizr\n.*?Version:([ A-Za-z0-9.]*)-?.*\n.*?', re.DOTALL)
def ver_from_deb_repo(self, repo, branch=None):
out = {}
deb_repo_url_template = repo['deb_repo_url']
if deb_repo_url_template:
deb_repo_url_template = deb_repo_url_template.strip()
if branch:
deb_repo_url_template = deb_repo_url_template % branch
deb_repo_url = '/'.join(deb_repo_url_template.split())
url = os.path.join(deb_repo_url, 'Packages')
try:
r = requests.get(url)
r.raise_for_status()
assert r.text, 'Empty Packages file'
out[deb_repo_url_template] = self.deb_pattern.findall(r.text)[0].strip()
except (requests.exceptions.HTTPError, requests.exceptions.InvalidSchema):
msg = 'Deb repository {0} failed, file not found: {1}'
msg = msg.format(repo['deb_repo_url'], url)
LOG.warning(msg)
return out
rpm_pattern_1 = re.compile(
r'<package type="rpm">[\n ]*<name>scalarizr-base</name>.*?ver="([A-Za-z0-9.]*)-?.*".*?</package>',
re.DOTALL)
rpm_pattern_2 = re.compile(
r'<package type="rpm">[\n ]*<name>scalarizr</name>.*ver="([ A-Za-z0-9.]*)-?.*".*</package>',
re.DOTALL)
def ver_from_rpm_repo(self, repo, branch=None):
out = {}
rpm_repo_url_template = repo['rpm_repo_url']
if rpm_repo_url_template:
rpm_repo_url_template = rpm_repo_url_template.strip()
if branch:
rpm_repo_url_template = rpm_repo_url_template % branch
for release in ['5', '6', '7']:
rpm_repo_url = rpm_repo_url_template.replace('$releasever', release)
rpm_repo_url = rpm_repo_url.replace('$basearch', 'x86_64')
url = os.path.join(rpm_repo_url, 'repodata/primary.xml.gz')
try:
r = requests.get(url)
r.raise_for_status()
assert r.text, 'Empty primary.xml file'
s = StringIO.StringIO(r.content)
f = gzip.GzipFile(fileobj=s, mode='r')
f.seek(0)
xml = minidom.parse(f)
try:
out[rpm_repo_url_template] = self.rpm_pattern_1.findall(xml.toxml())[0].strip()
except:
out[rpm_repo_url_template] = self.rpm_pattern_2.findall(xml.toxml())[0].strip()
except (requests.exceptions.HTTPError, requests.exceptions.InvalidSchema):
msg = 'RPM repository {0} failed, file not found: {1}'
msg = msg.format(repo['rpm_repo_url'], url)
LOG.warning(msg)
return out
win_pattern = re.compile(r'scalarizr *scalarizr_(.*).exe*', re.DOTALL)
def ver_from_win_repo(self, repo, branch=None):
out = {}
win_repo_url_template = repo['win_repo_url']
if win_repo_url_template:
win_repo_url_template = win_repo_url_template.strip()
if branch:
win_repo_url = win_repo_url_template % branch
else:
win_repo_url = win_repo_url_template
url = os.path.join(win_repo_url, 'x86_64/index')
try:
r = requests.get(url)
r.raise_for_status()
assert r.text, 'Empty index file'
out[win_repo_url] = self.win_pattern.findall(r.text)[0].split('-')[0]
except (requests.exceptions.HTTPError, requests.exceptions.InvalidSchema):
msg = 'Win repository {0} failed, file not found: {1}'
msg = msg.format(repo['win_repo_url'], url)
LOG.warning(msg)
return out
def get_szr_ver_from_repo(self, devel_branch=None, force=False):
out = {}
if devel_branch:
if 'devel_repos' not in self.scalr_config['scalarizr_update']:
return out
if not force:
try:
return self.get_szr_ver_from_repo.im_func.devel_cache[devel_branch]
except AttributeError:
self.get_szr_ver_from_repo.im_func.devel_cache = {}
except KeyError:
pass
norm_branch = devel_branch.replace('/', '-').replace('_', '-')
repos = self.scalr_config['scalarizr_update']['devel_repos']
else:
if not force:
try:
return self.get_szr_ver_from_repo.im_func.cache
except AttributeError:
pass
norm_branch = None
repos = self.scalr_config['scalarizr_update']['repos']
for repo_type, repo in repos.iteritems():
for k, func in {
'deb': self.ver_from_deb_repo,
'rpm': self.ver_from_rpm_repo,
'win': self.ver_from_win_repo}.iteritems():
try:
data = func(repo, branch=norm_branch)
if data:
out.update(data)
out.setdefault(repo_type, {})[k] = data.values()[0]
except:
msg = '{0} repository {1} failed'.format(k, repo_type)
LOG.exception(msg)
if devel_branch:
self.get_szr_ver_from_repo.im_func.devel_cache[devel_branch] = out
else:
self.get_szr_ver_from_repo.im_func.cache = out
return out
def update_scalr_repo_data(self):
info = {}
vers = self.get_szr_ver_from_repo()
repos = self.scalr_config['scalarizr_update']['repos']
for repo_url in vers:
for repo in repos:
if repo_url in repos[repo].values():
info[repo] = vers[repo_url]
break
if not info:
return
query = (
"INSERT INTO settings "
"(id, value) "
"VALUES ('szr.repo.{name}', '{value}') "
"ON DUPLICATE KEY "
"UPDATE value = '{value}'"
)
for repo, vers in info.iteritems():
repo = pymysql.escape_string(repo)
self._db.execute(query.format(name=repo, value=vers))
def _get_db_servers(self):
if eol_os:
query = (
"SELECT s.server_id, s.farm_id, s.farm_roleid farm_role_id, s.remote_ip, s.local_ip, "
"s.platform, r.os_id "
"FROM servers s "
"JOIN farm_roles fr ON s.farm_roleid=fr.id "
"JOIN roles r ON fr.role_id=r.id "
"WHERE r.os_id NOT IN ({}) "
"AND s.status IN ('Running') "
"ORDER BY s.server_id".format(str(eol_os)[1:-1]))
else:
query = (
"SELECT server_id, farm_id, farm_roleid farm_role_id, "
"remote_ip, local_ip, platform "
"FROM servers "
"WHERE status IN ('Running') "
"ORDER BY server_id")
return self._db.execute_with_limit(query, 500, retries=1)
def _get_szr_upd_client(self, server):
key = cryptotool.decrypt_key(server['scalarizr.key'])
headers = {'X-Server-Id': server['server_id']}
instances_connection_policy = self.scalr_config.get(server['platform'], {}).get(
'instances_connection_policy', self.scalr_config['instances_connection_policy'])
ip, port, proxy_headers = helper.get_szr_updc_conn_info(
server, instances_connection_policy)
headers.update(proxy_headers)
szr_upd_client = SzrUpdClient(ip, port, key, headers=headers)
return szr_upd_client
def _get_status(self, server):
szr_upd_client = self._get_szr_upd_client(server)
timeout = self.config['instances_connection_timeout']
status = szr_upd_client.status(cached=True, timeout=timeout)
return status
def _get_statuses(self, servers):
async_results = {}
for server in servers:
if 'scalarizr.key' not in server:
msg = "Server: {0}, reason: Missing scalarizr key".format(server['server_id'])
LOG.warning(msg)
continue
if 'scalarizr.updc_port' not in server:
api_port = self.scalr_config['scalarizr_update'].get('api_port', 8008)
server['scalarizr.updc_port'] = api_port
self._pool.wait()
async_results[server['server_id']] = self._pool.apply_async(
self._get_status, (server,))
gevent.sleep(0) # force switch
statuses = {}
timeout = self.config['instances_connection_timeout']
for server in servers:
try:
server_id = server['server_id']
statuses[server_id] = async_results[server_id].get(timeout=timeout)
except:
msg = 'Unable to get update client status, server: {0}, reason: {1}'
msg = msg.format(server['server_id'], helper.exc_info())
LOG.warning(msg)
return statuses
def _load_servers_data(self, servers):
props = ('scalarizr.key', 'scalarizr.updc_port', 'scalarizr.version')
self._db.load_server_properties(servers, props)
farms = [{'id': __} for __ in set(_['farm_id'] for _ in servers)]
props = ('szr.upd.schedule',)
self._db.load_farm_settings(farms, props)
farms_map = dict((_['id'], _) for _ in farms)
farms_roles = [{'id': __} for __ in set(_['farm_role_id'] for _ in servers)]
props = ['base.upd.schedule', 'scheduled_on', 'user-data.scm_branch']
self._db.load_farm_role_settings(farms_roles, props)
farms_roles_map = dict((_['id'], _) for _ in farms_roles)
for server in servers:
schedule = farms_roles_map.get(
server['farm_role_id'], {}).get('base.upd.schedule', None)
if not schedule:
schedule = farms_map.get(
server['farm_id'], {}).get('szr.upd.schedule', '* * *')
server['schedule'] = schedule
server['scheduled_on'] = str(farms_roles_map.get(
server['farm_role_id'], {}).get('scheduled_on', None))
server['user-data.scm_branch'] = farms_roles_map.get(
server['farm_role_id'], {}).get('user-data.scm_branch', None)
return servers
def _set_next_update_dt(self, servers):
for server in servers:
next_update_dt = str(self._scheduled_on(server['schedule']))
if next_update_dt != server['scheduled_on']:
query = (
"""INSERT INTO farm_role_settings """
"""(farm_roleid, name, value) """
"""VALUES ({0}, 'scheduled_on', '{1}') """
"""ON DUPLICATE KEY UPDATE value='{1}'"""
).format(server['farm_role_id'], next_update_dt)
msg = "Set next update datetime for server: {0} to: {1}"
msg = msg.format(server['server_id'], next_update_dt)
LOG.debug(msg)
try:
self._db.execute(query, retries=1)
except:
msg = 'Unable to update next update datetime for server: {0}, reason: {1}'
msg = msg.format(server['server_id'], helper.exc_info())
LOG.warning(msg)
def _get_servers_scheduled_for_update(self, servers):
servers_scheduled_for_update = []
for server in servers:
try:
version = server['scalarizr.version']
if not version or parse_version(version) < parse_version('2.7.7'):
continue
if not schedule_parser.Schedule(server['schedule']).intime():
continue
servers_scheduled_for_update.append(server)
except:
msg = "Server: {0}, reason: {1}".format(server['server_id'], helper.exc_info())
LOG.warning(msg)
continue
return servers_scheduled_for_update
def _is_server_for_update(self, server, status):
repo_url = status['repo_url']
devel_branch = server.get('user-data.scm_branch', None)
ver_info = self.get_szr_ver_from_repo(devel_branch=devel_branch)
try:
szr_ver_repo = ver_info[repo_url]
except KeyError:
pkg_type = helper.pkg_type_by_name(status['dist'].split()[0])
szr_ver_repo = ver_info[status['repository']][pkg_type]
if parse_version(server['scalarizr.version']) >= parse_version(szr_ver_repo):
return False
if 'in-progress' in status['state']:
# skip in-progress server
return False
if status['executed_at']:
last_update_dt = datetime.datetime.strptime(
status['executed_at'], '%a %d %b %Y %H:%M:%S %Z')
last_update_dt = last_update_dt.replace(minute=0, second=0, microsecond=0)
utcnow_dt = datetime.datetime.utcnow()
utcnow_dt = utcnow_dt.replace(minute=0, second=0, microsecond=0)
if last_update_dt == utcnow_dt and status['state'] == 'error':
# skip failed server
LOG.debug(
'Skip server: {0}, reason: server in error state'.format(server['server_id']))
return False
return True
def _scheduled_on(self, schedule):
dt = datetime.datetime.utcnow()
delta = datetime.timedelta(hours=1)
for _ in xrange(0, 24 * 31 * 365):
dt = dt + delta
if schedule_parser.Schedule(schedule).intime(now=dt.timetuple()):
return dt.replace(minute=0, second=0, microsecond=0)
def get_servers_for_update(self):
servers_for_update_high_pri = []
servers_for_update_low_pri = []
for servers in self._get_db_servers():
self._load_servers_data(servers)
self._set_next_update_dt(servers)
servers_scheduled_for_update = self._get_servers_scheduled_for_update(servers)
self._db.load_vpc_settings(servers_scheduled_for_update)
statuses = self._get_statuses(servers_scheduled_for_update)
for server in servers_scheduled_for_update:
try:
if server['server_id'] not in statuses:
continue
if not self._is_server_for_update(server, statuses[server['server_id']]):
continue
if server['schedule'] == '* * *':
free = self.config['chunk_size'] - len(servers_for_update_high_pri)
if len(servers_for_update_low_pri) < free:
servers_for_update_low_pri.append(server)
else:
servers_for_update_high_pri.append(server)
if len(servers_for_update_high_pri) >= self.config['chunk_size']:
break
except:
msg = "Server: {0}, reason: {1}".format(server['server_id'], helper.exc_info())
LOG.warning(msg)
else:
continue
break
if len(servers_for_update_high_pri) < self.config['chunk_size']:
servers_for_update = servers_for_update_high_pri + servers_for_update_low_pri
servers_for_update = servers_for_update[0:self.config['chunk_size']]
else:
servers_for_update = servers_for_update_high_pri
return servers_for_update
def update_server(self, server):
try:
szr_upd_client = self._get_szr_upd_client(server)
timeout = self.config['instances_connection_timeout']
msg = "Trying to update server: {0}, version: {1}".format(
server['server_id'], server['scalarizr.version'])
LOG.debug(msg)
try:
result_id = szr_upd_client.update(async=True, timeout=timeout)
except:
msg = 'Unable to update, reason: {0}'.format(helper.exc_info())
raise Exception(msg)
LOG.debug("Server: {0}, result: {1}".format(server['server_id'], result_id))
except:
msg = "Server failed: {0}, reason: {1}".format(server['server_id'], helper.exc_info())
LOG.warning(msg)
def before_iteration(self):
self.load_config()
self.configure()
self.clear_cache()
def do_iteration(self):
servers = self.get_servers_for_update()
for server in servers:
try:
self._pool.wait()
self._pool.apply_async(self.update_server, (server,))
gevent.sleep(0) # force switch
except:
LOG.warning(helper.exc_info())
self._pool.join()
try:
self.update_scalr_repo_data()
except:
msg = 'Unable to update scalr.settings table, reason: {0}'.format(helper.exc_info())
LOG.error(msg)
def after_iteration(self):
self._pool.kill()
def main():
global app
app = SzrUpdService()
try:
app.load_config()
app.configure()
app.run()
except exceptions.AlreadyRunningError:
LOG.info(helper.exc_info(where=False))
except (SystemExit, KeyboardInterrupt):
pass
except:
LOG.exception('Oops')
if __name__ == '__main__':
main()
| python | 20,614 |
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
from loguru import logger
from frontend.login import LoginForm
from frontend.main import MainPHWindow
from components.sgph_module import do_login
from exceptions.internet_exception import *
from exceptions.sgph_exception import *
from exceptions.phman_exception import *
from components.utils.thread_util import execute_background
from components.db.db_manager import save_user, get_current_device, get_current_user
from components.sgph_module import get_current_rol, get_persona
from components.phman_module import check_guard_permissions
class BiometricController():
def __init__(self, biometric_tab):
self.biometric_tab = biometric_tab
def search_person(self):
try:
person = get_persona(self.biometric_tab.line_edit_identification.text(), self.biometric_tab.combo_box_identification_type.currentText(),
self.biometric_tab.combo_box_rol.currentText(), get_current_user().idPropiedadHorizontal)
self.biometric_tab.line_edit_person_name.setText(
person.nombres + " "if person.apellido is None else person.apellido)
except NoPersonFound as person_no_found:
QtWidgets.QMessageBox.warning(
None, 'No se pudo completar la operacion', str(person_no_found))
except Exception as faltal_excep:
logger.exception(faltal_excep)
QtWidgets.QMessageBox.warning(
None, 'No se pudo completar la operacion', '')
def run():
from frontend.main import MainPHWindow
from frontend.controllers.main_control import MainController
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QMainWindow()
main = MainPHWindow()
main_controller = MainController(main)
main.setupUi(window, main_controller)
biometric_controller = BiometricController(main)
main.set_biometric_controller(biometric_controller)
window.show()
sys.exit(app.exec_())
| python | 1,985 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..resampling import ApplyTransformsToPoints
def test_ApplyTransformsToPoints_inputs():
input_map = dict(args=dict(argstr='%s',
),
dimension=dict(argstr='--dimensionality %d',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
input_file=dict(argstr='--input %s',
mandatory=True,
),
invert_transform_flags=dict(),
num_threads=dict(nohash=True,
usedefault=True,
),
output_file=dict(argstr='--output %s',
hash_files=False,
name_source=['input_file'],
name_template='%s_transformed.csv',
),
terminal_output=dict(nohash=True,
),
transforms=dict(argstr='%s',
mandatory=True,
),
)
inputs = ApplyTransformsToPoints.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ApplyTransformsToPoints_outputs():
output_map = dict(output_file=dict(),
)
outputs = ApplyTransformsToPoints.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| python | 1,382 |
"""Players spreadsheet table"""
import re
from discord.ext import commands
from .base_spreadsheet import BaseSpreadsheet
from common.api.spreadsheet import (
find_corresponding_cell_best_effort_from_range,
find_corresponding_cells_best_effort_from_range,
Cell,
from_letter_base,
)
class PlayersSpreadsheet(BaseSpreadsheet):
"""Players spreadsheet class"""
def __init__(self, session=None, *args, **kwargs):
super().__init__(session, *args, **kwargs)
self._type = "players"
__tablename__ = "players_spreadsheet"
range_team_name = str()
range_team = str()
range_discord = str()
range_discord_id = str()
range_rank = str()
range_bws_rank = str()
range_osu_id = str()
range_pp = str()
range_country = str()
range_timezone = str()
max_range_for_teams = int(0)
class TeamNotFound(commands.CommandError):
"""Thrown when a match id is not found."""
def __init__(self, team):
self.team = team
class DuplicateTeam(commands.CommandError):
"""Thrown when a match id is found multiple times."""
def __init__(self, team):
self.team = team
class TeamInfo:
"""Contains all info about a team."""
class PlayerInfo:
def __init__(
self,
name,
discord=None,
discord_id=None,
rank=None,
bws_rank=None,
osu_id=None,
pp=None,
country=None,
is_captain=False,
):
self.name = name
self.discord = discord if discord else Cell(-1, -1, "")
self.discord.value_type = str
self.discord_id = discord_id if discord_id else Cell(-1, -1, 0)
self.discord_id.value_type = int
self.rank = rank if rank else Cell(-1, -1, "")
self.rank.value_type = str
self.bws_rank = bws_rank if bws_rank else Cell(-1, -1, "")
self.bws_rank.value_type = str
self.osu_id = osu_id if osu_id else Cell(-1, -1, "")
self.osu_id.value_type = str
self.pp = pp if pp else Cell(-1, -1, "")
self.pp.value_type = str
self.country = country if country else Cell(-1, -1, "")
self.country.value_type = str
self.is_captain = is_captain
def __init__(self, team_name_cell):
self.team_name = team_name_cell
self.team_name.value_type = str
self.players = []
self.timezone = Cell(-1, -1, "")
def add_player(self, player_info):
if not self.players:
player_info.is_captain = True
self.players.append(player_info)
def set_timezone(self, timezone_cell):
self.timezone = timezone_cell
self.timezone.value_type = str
def find_player(self, name, discord_id, discord):
for player in self.players:
if discord_id and discord_id == player.discord_id:
return player
elif discord and discord == player.discord:
return player
elif name and name.casefold() == player.name.casefold():
return player
return None
def get_team_captain(self):
for player in self.players:
if player.is_captain:
return player
return self.players[0]
@staticmethod
def from_player_name(players_spreadsheet, player_name):
player_name = str(player_name)
player_cells = players_spreadsheet.spreadsheet.find_cells(players_spreadsheet.range_team, player_name)
if not player_cells:
raise TeamNotFound(player_name)
# ? To keep ?
# if len(player_cells) > 1:
# raise DuplicateTeam(player_name)
player_cell = player_cells[0]
return TeamInfo.from_player_cell(players_spreadsheet, player_cell)
@staticmethod
def from_player_cell(players_spreadsheet, player_cell):
team_info = TeamInfo(player_cell)
discord = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_discord,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
discord_id = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_discord_id,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
rank = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_rank,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
bws_rank = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_bws_rank,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
osu_id = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_osu_id,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
pp = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_pp,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
country = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_country,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
team_info.set_timezone(
find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_timezone,
player_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
team_info.add_player(TeamInfo.PlayerInfo(player_cell, discord, discord_id, rank, bws_rank, osu_id, pp, country))
return team_info
@staticmethod
def from_discord_id(players_spreadsheet, discord_id):
discord_id = str(discord_id)
discord_id_cells = players_spreadsheet.spreadsheet.find_cells(players_spreadsheet.range_discord_id, discord_id)
if not discord_id_cells:
raise TeamNotFound(discord_id)
discord_id_cell = discord_id_cells[0]
return TeamInfo.from_discord_id_cell(players_spreadsheet, discord_id_cell)
@staticmethod
def from_discord_id_cell(players_spreadsheet, discord_id_cell):
if players_spreadsheet.range_team_name:
team_name_cell = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_team_name,
discord_id_cell,
)
if team_name_cell.x == -1:
raise TeamNotFound(discord_id_cell.get())
return TeamInfo.from_team_name_cell(players_spreadsheet, team_name_cell)
else:
player_cell = find_corresponding_cell_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_team,
discord_id_cell,
)
if player_cell.x == -1:
raise TeamNotFound(discord_id_cell.get())
return TeamInfo.from_player_cell(players_spreadsheet, player_cell)
@staticmethod
def from_team_name(players_spreadsheet, team_name):
team_name = str(team_name)
if not players_spreadsheet.range_team_name:
return TeamInfo.from_player_name(players_spreadsheet, team_name)
team_name_cells = players_spreadsheet.spreadsheet.find_cells(players_spreadsheet.range_team_name, team_name)
if not team_name_cells:
raise TeamNotFound(team_name)
# ? To keep ?
# if len(team_name_cells) > 1:
# raise DuplicateTeam(team_name)
team_name_cell = team_name_cells[0]
return TeamInfo.from_team_name_cell(players_spreadsheet, team_name_cell)
@staticmethod
def from_team_name_cell(players_spreadsheet, team_name_cell):
team_info = TeamInfo(team_name_cell)
players_data = []
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_team,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
to_string=True,
)
)
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_discord,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_discord_id,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_rank,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_bws_rank,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_osu_id,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_pp,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
players_data.append(
find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_country,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)
)
# TODO: only one cell needed / maybe a new function needed ?
team_info.timezone = find_corresponding_cells_best_effort_from_range(
players_spreadsheet.spreadsheet,
players_spreadsheet.range_timezone,
team_name_cell,
max_difference_with_base=players_spreadsheet.max_range_for_teams,
)[0]
for name, discord, discord_id, rank, bws_rank, osu_id, pp, country in zip(players_data):
team_info.add_player(TeamInfo.PlayerInfo(name, discord, discord_id, rank, bws_rank, osu_id, pp, country))
return team_info
@staticmethod
def get_first_blank_fields(players_spreadsheet):
range_to_use = None
if players_spreadsheet.range_team_name:
range_to_use = players_spreadsheet.range_team_name
else:
range_to_use = players_spreadsheet.range_team
cells = players_spreadsheet.spreadsheet.get_range(range_to_use)
if not cells:
worksheet, range_to_use = players_spreadsheet.spreadsheet.get_worksheet_and_range(range_to_use)
splitted_range = range_to_use.split(":")[0]
column, row, _ = re.split(r"(\d+)", splitted_range) # TODO: handle all kind of ranges
cells = [[Cell(from_letter_base(column), int(row) - 1, "")]]
worksheet.cells = cells
used = False
for row in cells:
used = False
for cell in row:
if cell:
used = True
break
if not used:
break
if used:
worksheet, _ = players_spreadsheet.spreadsheet.get_worksheet_and_range(range_to_use)
cells = worksheet.cells
row = [Cell(row[0].x, row[0].y + 1, "")]
cells.append(row)
if players_spreadsheet.range_team_name:
return TeamInfo.from_team_name_cell(players_spreadsheet, row[0])
else:
return TeamInfo.from_player_cell(players_spreadsheet, row[0])
| python | 13,226 |
# coding: utf-8
import pprint
import re
import six
class ShowTemplateFileRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'template_id': 'str',
'file_path': 'str',
'type': 'str'
}
attribute_map = {
'x_language': 'X-Language',
'template_id': 'template_id',
'file_path': 'file_path',
'type': 'type'
}
def __init__(self, x_language='zh-cn', template_id=None, file_path=None, type='source-package'):
"""ShowTemplateFileRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._template_id = None
self._file_path = None
self._type = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.template_id = template_id
self.file_path = file_path
if type is not None:
self.type = type
@property
def x_language(self):
"""Gets the x_language of this ShowTemplateFileRequest.
:return: The x_language of this ShowTemplateFileRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ShowTemplateFileRequest.
:param x_language: The x_language of this ShowTemplateFileRequest.
:type: str
"""
self._x_language = x_language
@property
def template_id(self):
"""Gets the template_id of this ShowTemplateFileRequest.
:return: The template_id of this ShowTemplateFileRequest.
:rtype: str
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this ShowTemplateFileRequest.
:param template_id: The template_id of this ShowTemplateFileRequest.
:type: str
"""
self._template_id = template_id
@property
def file_path(self):
"""Gets the file_path of this ShowTemplateFileRequest.
:return: The file_path of this ShowTemplateFileRequest.
:rtype: str
"""
return self._file_path
@file_path.setter
def file_path(self, file_path):
"""Sets the file_path of this ShowTemplateFileRequest.
:param file_path: The file_path of this ShowTemplateFileRequest.
:type: str
"""
self._file_path = file_path
@property
def type(self):
"""Gets the type of this ShowTemplateFileRequest.
:return: The type of this ShowTemplateFileRequest.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ShowTemplateFileRequest.
:param type: The type of this ShowTemplateFileRequest.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowTemplateFileRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| python | 4,705 |
__all__ = ["set_python_seed"]
def set_python_seed(seed):
import random
random.seed(seed)
| python | 99 |
from typing import Dict
import torch
import torch.nn as nn
urls: Dict[str, str] = {}
urls[
"liberty"
] = "https://github.com/vbalnt/tfeat/raw/master/pretrained-models/tfeat-liberty.params" # pylint: disable
urls[
"notredame"
] = "https://github.com/vbalnt/tfeat/raw/master/pretrained-models/tfeat-notredame.params" # pylint: disable
urls[
"yosemite"
] = "https://github.com/vbalnt/tfeat/raw/master/pretrained-models/tfeat-yosemite.params" # pylint: disable
class TFeat(nn.Module):
r"""Module, which computes TFeat descriptors of given grayscale patches of 32x32.
This is based on the original code from paper "Learning local feature descriptors
with triplets and shallow convolutional neural networks".
See :cite:`TFeat2016` for more details
Args:
pretrained: Download and set pretrained weights to the model.
Returns:
TFeat descriptor of the patches.
Shape:
- Input: (B, 1, 32, 32)
- Output: (B, 128)
Examples:
>>> input = torch.rand(16, 1, 32, 32)
>>> tfeat = TFeat()
>>> descs = tfeat(input) # 16x128
"""
def __init__(self, pretrained: bool = False) -> None:
super().__init__()
self.features = nn.Sequential(
nn.InstanceNorm2d(1, affine=False),
nn.Conv2d(1, 32, kernel_size=7),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=6),
nn.Tanh(),
)
self.descr = nn.Sequential(nn.Linear(64 * 8 * 8, 128), nn.Tanh())
# use torch.hub to load pretrained model
if pretrained:
pretrained_dict = torch.hub.load_state_dict_from_url(
urls['liberty'], map_location=lambda storage, loc: storage
)
self.load_state_dict(pretrained_dict, strict=True)
self.eval()
def forward(self, input: torch.Tensor) -> torch.Tensor:
x = self.features(input)
x = x.view(x.size(0), -1)
x = self.descr(x)
return x
| python | 2,050 |
"""
Unit tests for optimization routines from optimize.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_,
assert_almost_equal, assert_warns,
assert_array_less)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import optimize
def test_check_grad():
# Verify if check_grad is able to estimate the derivative of the
# logistic function.
def logit(x):
return 1 / (1 + np.exp(-x))
def der_logit(x):
return np.exp(-x) / (1 + np.exp(-x))**2
x0 = np.array([1.5])
r = optimize.check_grad(logit, der_logit, x0)
assert_almost_equal(r, 0)
r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
assert_almost_equal(r, 0)
# Check if the epsilon parameter is being considered.
r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
assert_(r > 1e-7)
class CheckOptimize(object):
""" Base test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setup_method(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
class CheckOptimizeParameterized(CheckOptimize):
def test_cg(self):
# conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams,
self.grad, (), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_cg_cornercase(self):
def f(r):
return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
# Check several initial guesses. (Too far away from the
# minimum, the function ends up in the flat region of exp.)
for x0 in np.linspace(-0.75, 3, 71):
sol = optimize.minimize(f, [x0], method='CG')
assert_(sol.success)
assert_allclose(sol.x, [0.5], rtol=1e-5)
def test_bfgs(self):
# Broyden-Fletcher-Goldfarb-Shanno optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_infinite(self):
# Test corner case where -Inf is the minimum. See gh-2019.
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if self.use_wrapper:
opts = {'disp': self.disp}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_powell(self):
# Powell (direction set) optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[34:39],
[[0.72949016, -0.44156936, 0.47100962],
[0.72949016, -0.44156936, 0.48052496],
[1.45898031, -0.88313872, 0.95153458],
[0.72949016, -0.44156936, 0.47576729],
[1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self):
# Nelder-Mead simplex algorithm
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag, final_simplex = (
res['x'], res['fun'], res['nit'], res['nfev'],
res['status'], res['final_simplex'])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968, -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex(self):
# Nelder-Mead simplex algorithm
simplex = np.zeros((4, 3))
simplex[...] = self.startparams
for j in range(3):
simplex[j+1,j] += 0.1
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': True, 'initial_simplex': simplex}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['nit'], res['nfev'], \
res['status']
assert_allclose(res['allvecs'][0], simplex[0])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.17.0. Don't allow them to increase.
assert_(self.funccalls == 100, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.15.0
assert_allclose(self.trace[50:52],
[[0.14687474, -0.5103282, 0.48252111],
[0.14474003, -0.5282084, 0.48743951]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex_bad(self):
# Check it fails with a bad simplices
bad_simplices = []
simplex = np.zeros((3, 2))
simplex[...] = self.startparams[:2]
for j in range(2):
simplex[j+1,j] += 0.1
bad_simplices.append(simplex)
simplex = np.zeros((3, 3))
bad_simplices.append(simplex)
for simplex in bad_simplices:
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False, 'initial_simplex': simplex}
assert_raises(ValueError,
optimize.minimize, self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
else:
assert_raises(ValueError, optimize.fmin, self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
def test_ncg_negative_maxiter(self):
# Regression test for gh-8241
opts = {'maxiter': -1}
result = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)
assert_(result.status == 1)
def test_ncg(self):
# line-search Newton conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0
#assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self):
# Newton conjugate gradient with Hessian
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess=self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess=self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self):
# Newton conjugate gradient with Hessian times a vector p.
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp=self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p=self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_neldermead_xatol_fatol():
# gh4484
# test we can call with fatol, xatol specified
func = lambda x: x[0]**2 + x[1]**2
optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
xatol=1e-3, fatol=1e-3)
assert_warns(DeprecationWarning,
optimize._minimize._minimize_neldermead,
func, [1, 1], xtol=1e-3, ftol=1e-3, maxiter=2)
def test_neldermead_adaptive():
func = lambda x: np.sum(x**2)
p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, 0.32308456,
0.9692297, 0.4471682, 0.77411992, 0.80441652, 0.35994957, 0.75487856,
0.99973421, 0.65063887, 0.09626474]
res = optimize.minimize(func, p0, method='Nelder-Mead')
assert_equal(res.success, False)
res = optimize.minimize(func, p0, method='Nelder-Mead',
options={'adaptive':True})
assert_equal(res.success, True)
class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = True
class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = False
class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = True
class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = False
class TestOptimizeSimple(CheckOptimize):
def test_bfgs_nan(self):
# Test corner case where nan is fed to optimizer. See gh-2067.
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
with np.errstate(over='ignore', invalid='ignore'):
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
def test_bfgs_nan_return(self):
# Test corner cases where fun returns NaN. See gh-4793.
# First case: NaN from first call.
func = lambda x: np.nan
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
# Second case: NaN from second call.
func = lambda x: 0 if x == 0 else np.nan
fprime = lambda x: np.ones_like(x) # Steer away from zero.
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0, jac=fprime)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
def test_bfgs_numerical_jacobian(self):
# BFGS with numerical jacobian and a vector epsilon parameter.
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_gh_2169(self):
def f(x):
if x < 0:
return 1.79769313e+308
else:
return x + 1./x
xs = optimize.fmin_bfgs(f, [10.], disp=False)
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
def test_l_bfgs_b(self):
# limited-memory bound-constrained BFGS algorithm
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[0., -0.52489628, 0.48753042],
[0., -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
# L-BFGS-B with numerical jacobian
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with combined objective function and jacobian
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_maxiter(self):
# gh7854
# Ensure that not more than maxiters are ever run.
class Callback(object):
def __init__(self):
self.nit = 0
self.fun = None
self.x = None
def __call__(self, x):
self.x = x
self.fun = optimize.rosen(x)
self.nit += 1
c = Callback()
res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
callback=c, options={'maxiter': 5})
assert_equal(res.nit, 5)
assert_almost_equal(res.x, c.x)
assert_almost_equal(res.fun, c.fun)
assert_equal(res.status, 1)
assert_(res.success is False)
assert_equal(res.message.decode(), 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')
def test_minimize_l_bfgs_b(self):
# Minimize with L-BFGS-B method
opts = {'disp': False, 'maxiter': self.maxiter}
r = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
assert_allclose(self.func(r.x), self.func(self.solution),
atol=1e-6)
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', options=opts)
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
# check that function evaluations in approximate jacobian are counted
assert_(ra.nfev > r.nfev)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert_(v < v0)
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_minimize_l_bfgs_maxls(self):
# check that the maxls is passed down to the Fortran routine
sol = optimize.minimize(optimize.rosen, np.array([-1.2,1.0]),
method='L-BFGS-B', jac=optimize.rosen_der,
options={'disp': False, 'maxls': 1})
assert_(not sol.success)
def test_minimize_l_bfgs_b_maxfun_interruption(self):
# gh-6162
f = optimize.rosen
g = optimize.rosen_der
values = []
x0 = np.ones(7) * 1000
def objfun(x):
value = f(x)
values.append(value)
return value
# Look for an interesting test case.
# Request a maxfun that stops at a particularly bad function
# evaluation somewhere between 100 and 300 evaluations.
low, medium, high = 30, 100, 300
optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
v, k = max((y, i) for i, y in enumerate(values[medium:]))
maxfun = medium + k
# If the minimization strategy is reasonable,
# the minimize() result should not be worse than the best
# of the first 30 function evaluations.
target = min(values[:low])
xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
assert_array_less(fmin, target)
def test_custom(self):
# This function comes from the documentation example.
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = optimize.minimize(optimize.rosen, x0, method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
if method in ('nelder-mead', 'powell', 'cobyla'):
jac = None
else:
jac = dfunc
sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,
method=method)
assert_(func(sol1.x) < func(sol2.x),
"%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
@pytest.mark.parametrize('method', ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
'fmin_slsqp',
'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B',
'TNC', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov'])
def test_minimize_callback_copies_array(self, method):
# Check that arrays passed to callbacks are not modified
# inplace by the optimizer afterward
if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))
else:
func = optimize.rosen
jac = optimize.rosen_der
hess = optimize.rosen_hess
x0 = np.zeros(10)
# Set options
kwargs = {}
if method.startswith('fmin'):
routine = getattr(optimize, method)
if method == 'fmin_slsqp':
kwargs['iter'] = 5
elif method == 'fmin_tnc':
kwargs['maxfun'] = 100
else:
kwargs['maxiter'] = 5
else:
def routine(*a, **kw):
kw['method'] = method
return optimize.minimize(*a, **kw)
if method == 'TNC':
kwargs['options'] = dict(maxiter=100)
else:
kwargs['options'] = dict(maxiter=5)
if method in ('fmin_ncg',):
kwargs['fprime'] = jac
elif method in ('Newton-CG',):
kwargs['jac'] = jac
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
'trust-constr'):
kwargs['jac'] = jac
kwargs['hess'] = hess
# Run with callback
results = []
def callback(x, *args, **kwargs):
results.append((x, np.copy(x)))
sol = routine(func, x0, callback=callback, **kwargs)
# Check returned arrays coincide with their copies and have no memory overlap
assert_(len(results) > 2)
assert_(all(np.all(x == y) for x, y in results))
assert_(not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2)))
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'])
def test_no_increase(self, method):
# Check that the solver doesn't return a value worse than the
# initial point.
def func(x):
return (x - 1)**2
def bad_grad(x):
# purposefully invalid gradient function, simulates a case
# where line searches start failing
return 2*(x - 1) * (-1) - 2
x0 = np.array([2.0])
f0 = func(x0)
jac = bad_grad
if method in ['nelder-mead', 'powell', 'cobyla']:
jac = None
sol = optimize.minimize(func, x0, jac=jac, method=method,
options=dict(maxiter=20))
assert_equal(func(sol.x), sol.fun)
if method == 'slsqp':
pytest.xfail("SLSQP returns slightly worse")
assert_(func(sol.x) <= f0)
def test_slsqp_respect_bounds(self):
# Regression test for gh-3108
def f(x):
return sum((x - np.array([1., 2., 3., 4.]))**2)
def cons(x):
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
x0 = np.array([0.5, 1., 1.5, 2.])
res = optimize.minimize(f, x0, method='slsqp',
constraints={'type': 'ineq', 'fun': cons})
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
def test_minimize_automethod(self):
def f(x):
return x**2
def cons(x):
return x - 2
x0 = np.array([10.])
sol_0 = optimize.minimize(f, x0)
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}])
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)])
sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)])
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
assert_(sol.success)
assert_allclose(sol_0.x, 0, atol=1e-7)
assert_allclose(sol_1.x, 2, atol=1e-7)
assert_allclose(sol_2.x, 5, atol=1e-7)
assert_allclose(sol_3.x, 5, atol=1e-7)
assert_allclose(sol_4.x, 2, atol=1e-7)
def test_minimize_coerce_args_param(self):
# Regression test for gh-3503
def Y(x, c):
return np.sum((x-c)**2)
def dY_dx(x, c=None):
return 2*(x-c)
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
xinit = np.random.randn(len(c))
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
def test_initial_step_scaling(self):
# Check that optimizer initial step is not huge even if the
# function and gradients are
scales = [1e-50, 1, 1e50]
methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
def f(x):
if first_step_size[0] is None and x[0] != x0[0]:
first_step_size[0] = abs(x[0] - x0[0])
if abs(x).max() > 1e4:
raise AssertionError("Optimization stepped far away!")
return scale*(x[0] - 1)**2
def g(x):
return np.array([scale*(x[0] - 1)])
for scale, method in itertools.product(scales, methods):
if method in ('CG', 'BFGS'):
options = dict(gtol=scale*1e-8)
else:
options = dict()
if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
# XXX: return initial point if they see small gradient
continue
x0 = [-1.0]
first_step_size = [None]
res = optimize.minimize(f, x0, jac=g, method=method,
options=options)
err_msg = "{0} {1}: {2}: {3}".format(method, scale, first_step_size,
res)
assert_(res.success, err_msg)
assert_allclose(res.x, [1.0], err_msg=err_msg)
assert_(res.nit <= 3, err_msg)
if scale > 1e-10:
if method in ('CG', 'BFGS'):
assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
else:
# Newton-CG and L-BFGS-B use different logic for the first step,
# but are both scaling invariant with step sizes ~ 1
assert_(first_step_size[0] > 0.5 and first_step_size[0] < 3,
err_msg)
else:
# step size has upper bound of ||grad||, so line
# search makes many small steps
pass
class TestLBFGSBBounds(object):
def setup_method(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with fun and jac combined and extra arguments
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
# Minimize with method='L-BFGS-B' with bounds
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar(object):
def setup_method(self):
self.solution = 1.5
def fun(self, x, a=1.5):
"""Objective function"""
return (x - a)**2 - 0.8
def test_brent(self):
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_golden(self):
x = optimize.golden(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, tol=0)
assert_allclose(x, self.solution)
maxiter_test_cases = [0, 1, 5]
for maxiter in maxiter_test_cases:
x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
nfev0, nfev = x0[2], x[2]
assert_equal(nfev - nfev0, maxiter)
def test_fminbound(self):
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
with pytest.raises(ValueError, match='.*must be scalar.*'):
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent')
assert_(x.success)
x = optimize.minimize_scalar(self.fun, method='Brent',
options=dict(maxiter=3))
assert_(not x.success)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
np.array([5])),
args=(np.array([1.5]), ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(5, 1), method='bounded', args=(1.5, ))
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar_custom(self):
# This function comes from the documentation example.
def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = (bracket[1] + bracket[0]) / 2.0
besty = fun(bestx)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for testx in [bestx - stepsize, bestx + stepsize]:
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, self.solution, atol=1e-6)
def test_minimize_scalar_coerce_args_param(self):
# Regression test for gh-3503
optimize.minimize_scalar(self.fun, args=1.5)
def test_brent_negative_tolerance():
assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
class TestNewtonCg(object):
def test_rosenbrock(self):
x0 = np.array([-1.2, 1.0])
sol = optimize.minimize(optimize.rosen, x0,
jac=optimize.rosen_der,
hess=optimize.rosen_hess,
tol=1e-5,
method='Newton-CG')
assert_(sol.success, sol.message)
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
def test_himmelblau(self):
x0 = np.array(himmelblau_x0)
sol = optimize.minimize(himmelblau,
x0,
jac=himmelblau_grad,
hess=himmelblau_hess,
method='Newton-CG',
tol=1e-6)
assert_(sol.success, sol.message)
assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
class TestRosen(object):
def test_hess(self):
# Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
def himmelblau(p):
"""
R^2 -> R^1 test function for optimization. The function has four local
minima where himmelblau(xopt) == 0.
"""
x, y = p
a = x*x + y - 11
b = x + y*y - 7
return a*a + b*b
def himmelblau_grad(p):
x, y = p
return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
def himmelblau_hess(p):
x, y = p
return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
[4*x + 4*y, 4*x + 12*y**2 - 26]])
himmelblau_x0 = [-0.27, -0.9]
himmelblau_xopt = [3, 2]
himmelblau_min = 0.0
def test_minimize_multiple_constraints():
# Regression test for gh-4240.
def func(x):
return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
def func1(x):
return np.array([x[1]])
def func2(x):
return np.array([x[2]])
cons = ({'type': 'ineq', 'fun': func},
{'type': 'ineq', 'fun': func1},
{'type': 'ineq', 'fun': func2})
f = lambda x: -1 * (x[0] + x[1] + x[2])
res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
assert_allclose(res.x, [125, 0, 0], atol=1e-10)
class TestOptimizeResultAttributes(object):
# Test that all minimizers return an OptimizeResult containing
# all the OptimizeResult attributes
def setup_method(self):
self.x0 = [5, 5]
self.func = optimize.rosen
self.jac = optimize.rosen_der
self.hess = optimize.rosen_hess
self.hessp = optimize.rosen_hess_prod
self.bounds = [(0., 10.), (0., 10.)]
def test_attributes_present(self):
methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'dogleg',
'trust-ncg']
attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
'message']
skip = {'COBYLA': ['nit']}
for method in methods:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Method .+ does not use (gradient|Hessian.*) information")
res = optimize.minimize(self.func, self.x0, method=method,
jac=self.jac, hess=self.hess,
hessp=self.hessp)
for attribute in attributes:
if method in skip and attribute in skip[method]:
continue
assert_(hasattr(res, attribute))
assert_(attribute in dir(res))
class TestBrute:
# Test the "brute force" method
def setup_method(self):
self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
self.solution = np.array([-1.05665192, 1.80834843])
def f1(self, z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
def f2(self, z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
def f3(self, z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
def func(self, z, *params):
return self.f1(z, *params) + self.f2(z, *params) + self.f3(z, *params)
def test_brute(self):
# test fmin
resbrute = optimize.brute(self.func, self.rranges, args=self.params,
full_output=True, finish=optimize.fmin)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], self.func(self.solution, *self.params),
atol=1e-3)
# test minimize
resbrute = optimize.brute(self.func, self.rranges, args=self.params,
full_output=True,
finish=optimize.minimize)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], self.func(self.solution, *self.params),
atol=1e-3)
def test_1D(self):
# test that for a 1D problem the test function is passed an array,
# not a scalar.
def f(x):
assert_(len(x.shape) == 1)
assert_(x.shape[0] == 1)
return x ** 2
optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
class TestIterationLimits(object):
# Tests that optimisation does not give up before trying requested
# number of iterations or evaluations. And that it does not succeed
# by exceeding the limits.
def setup_method(self):
self.funcalls = 0
def slow_func(self, v):
self.funcalls += 1
r,t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0],v[1])
return np.sin(r*20 + t)+r*0.5
def test_neldermead_limit(self):
self.check_limits("Nelder-Mead", 200)
def test_powell_limit(self):
self.check_limits("powell", 1000)
def check_limits(self, method, default_iters):
for start_v in [[0.1,0.1], [1,1], [2,2]]:
for mfev in [50, 500, 5000]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev)
else:
assert_(res["nfev"] >= mfev)
for mit in [50, 500,5000]:
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit})
if res["success"]:
assert_(res["nit"] <= mit)
else:
assert_(res["nit"] >= mit)
for mfev,mit in [[50,50], [5000,5000],[5000,np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit, "maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev and res["nit"] <= mit)
else:
assert_(res["nfev"] >= mfev or res["nit"] >= mit)
for mfev,mit in [[np.inf,None], [None,np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit, "maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
if mfev is None:
assert_(res["nfev"] < default_iters*2)
else:
assert_(res["nit"] <= default_iters*2)
else:
assert_(res["nfev"] >= default_iters*2 or
res["nit"] >= default_iters*2)
| python | 53,337 |
#Write a python program that prints:
#the sum of two floating point numbers
#the difference between two integers
#the product of a floating point number and an integer
#in each case, print the data type of the answer
#main function
def main():
#initialize variables
float_one = 38.0
float_two = 22.0
#set variable to result
sum = float_one + float_two
#print result along with data type
print("Sum of floats:", sum, "type:", type(sum))
integer_one = 41
integer_two = 19
diff = integer_one - integer_two
print("Difference of integers:", diff, "type:", type(diff))
product = float_one * integer_two
print("Product of float and integer:", product, "type:", type(product))
if __name__ == '__main__':
main() | python | 739 |
dados = dict()
time = list()
qtd_gols = list()
def l(): print('\n'+'=-'*25+'=')
l()
while True:
dados.clear()
dados['nome'] = input('nome do jogador: ')
qtd_partida = int(input('quantas partidas:'))
qtd_gols.clear()
for i in range(qtd_partida):
qtd_gols.append(int(input(f'quantos gols na {1+i}º partida: ')))
dados['gols'] = qtd_gols[:]
dados['total'] = sum(qtd_gols)
time.append(dados.copy())
while True:
sair = str(input('escreva s para sair e n para não: S/N').lower().strip()[0])
if sair in 'sn':
break
print('"erro", responda apenas s para sair e n para não')
if sair == 's':
break
l()
print('dados ', end='')
for i in dados.keys():
print(f'{(i+":"):<15}', end='')
l()
for key, v in enumerate(time):
print(f'{key:>3} ', end='')
for i in v.values():
print(f' {str(i):<15}', end='')
print()
l()
while True:
l()
busca = int(input('(999 p parar)mostrar dados de qual jogador: '))
if busca == 999:
break
if busca >= len(time):
print(f'erro! nao há jogador com o codigo de busca')
else:
print(f' -- levantamento do jogador {time[busca]["nome"]}:')
for i, g in enumerate(time[busca]['gols']):
print(f' no jogo {i+1} fez {g} gols')
l()
print('volte sempre')
dados = dict()
time = list()
qtd_gols = list()
while True:
dados.clear()
dados['nome'] = input('nome do jogador: ')
def l(): print('\n'+'=-'*25+'=')
qtd_partida = int(input('quantas partidas:'))
qtd_gols.clear()
for i in range(qtd_partida):
qtd_gols.append(int(input(f'quantos gols na {1+i}º partida: ')))
dados['gols'] = qtd_gols[:]
dados['total'] = sum(qtd_gols)
time.append(dados.copy())
while True:
sair = str(input('escreva s para sair e n para não: S/N').lower().strip()[0])
if sair in 'sn':
break
print('"erro", responda apenas s para sair e n para não')
if sair == 's':
break
l()
print('dados ', end='')
for i in dados.keys():
print(f'{i:<15}', end='')
l()
for key, v in enumerate(time):
print(f'{key:>3} ', end='')
for i in v.values():
print(f' {str(i):<15}', end='')
print()
l() | python | 2,305 |
# flake8: noqa
# Native libraries
import logging
import sys
from logging.config import fileConfig
from alembic import context
from alembic.autogenerate import rewriter
from alembic.operations import ops
from sqlalchemy import Column, engine_from_config, pool
import loggers
from config import ALEMBIC_CONFIG
from db import db
from db.models import * # noqa
sys.path.extend(["./"])
# To include a model in migrations, add a line here.
###############################################################################
config = context.config
config.set_main_option("sqlalchemy.url", str(ALEMBIC_CONFIG.url))
exclude_tables = config.get_section("alembic:exclude").get("tables", "").split(",")
fileConfig(config.config_file_name)
target_metadata = db
loggers.config(20, formatter="layman")
logger = logging.getLogger(__name__)
class CustomRewriter(rewriter.Rewriter):
""" Extends self.process_revision_directives since a standalone
process_revision_directives function and a rewriter cant both
be passed to the MigrationContext at the same time."""
def process_revision_directives(self, context, revision, directives):
if config.cmd_opts.autogenerate:
script = directives[0]
# Dont generate a new migration file if there are no pending operations
if script.upgrade_ops.is_empty():
directives[:] = []
logger.warning(
"No pending operations. Skipping creating an empty revision file."
)
else:
# generate the new migration using the rewriter
super().process_revision_directives(context, revision, directives)
writer = CustomRewriter()
@writer.rewrites(ops.CreateTableOp)
def order_columns(context, revision, op):
""" Enforce id to be the first column of the table, as well as forcing
created_at and updated_at to be the last columns"""
special_names = {"id": -100, "created_at": 1001, "updated_at": 1002}
cols_by_key = [
(
special_names.get(col.key, index) if isinstance(col, Column) else 2000,
col.copy(),
)
for index, col in enumerate(op.columns)
]
columns = [col for idx, col in sorted(cols_by_key, key=lambda entry: entry[0])]
return ops.CreateTableOp(op.table_name, columns, schema=op.schema, **op.kw)
def include_object(object, name, type_, reflected, compare_to): # nocover
if type_ == "table" and name in exclude_tables:
return False
else:
return True
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=ALEMBIC_CONFIG.url.__to_string__(hide_password=False),
target_metadata=target_metadata,
literal_binds=True,
include_object=include_object,
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
{"sqlalchemy.url": ALEMBIC_CONFIG.url.__to_string__(hide_password=False)},
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
transaction_per_migration=True,
process_revision_directives=writer,
)
with context.begin_transaction():
context.execute("SET search_path TO public")
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| python | 4,105 |
from django.contrib.auth.models import User
from django import forms
class UserForm(forms.ModelForm):
password=forms.CharField(widget=forms.PasswordInput)
class Meta:
model=User
fields=['username','email','password']
class change_MoneyForm(forms.Form):
new_amount = forms.IntegerField()
Loan_choice=[
('normal_loan','Normal Loan'),
('emergency_loan', 'Emergency Loan'),
]
class LoanReqForm(forms.Form):
loanChoice = forms.CharField(widget=forms.RadioSelect(choices=Loan_choice))
loan_amount = forms.IntegerField()
| python | 563 |
# coding=utf-8
import numpy as np
from sklearn import preprocessing
if __name__ == '__main__':
a = np.array([
[13, 2, 4, 41, 27],
[12, 4, 3, 46, 21],
[10, 5, 4, 29, 20],
[10, 4, 4, 31, 18],
[9, 4, 6, 27, 24],
[8, 6, 5, 36, 29],
[8, 5, 6, 31, 25],
[7, 6, 6, 27, 25],
[6, 5, 7, 35, 29],
[6, 4, 9, 24, 29],
[5, 6, 7, 20, 26],
[4, 7, 8, 20, 27],
[3, 8, 8, 22, 28],
[4, 4, 11, 21, 43],
[3, 6, 9, 12, 26],
[2, 4, 13, 14, 39]]
)
min_max_scaler = preprocessing.MinMaxScaler()
# print(a)
print (min_max_scaler.fit_transform(a)) | python | 668 |
'''
MIT License
Name cs225sp20_env Python Package
URL https://github.com/Xiwei-Wang/cs225sp20_env
Version 1.0
Creation Date 26 April 2020
Copyright(c) 2020 Instructors, TAs and Some Students of UIUC CS 225 SP20 ZJUI Course
Instructorts: Prof. Dr. Klaus-Dieter Schewe
TAs: Tingou Liang, Run Zhang, Enyi Jiang, Xiang Li
Group 1 Students: Shen Zheng, Haozhe Chen, Ruiqi Li, Xiwei Wang
Other Students: Zhongbo Zhu
Above all, due to academic integrity, students who will take UIUC CS 225 ZJUI Course
taught with Python later than Spring 2020 semester are NOT authorized with the access
to this package.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------
File cs225sp20_env/Graph/WDiGraph.py
Version 1.0
'''
# %%
# for VS Code users
if __name__ != "cs225sp20_env.Graph.WDiGraph":
import sys
sys.path.append(__file__[:-len("cs225sp20_env/Graph/WDiGraph.py")])
# %%
# for PyCharm users
if __name__ != "cs225sp20_env.Graph.WDiGraph":
import sys
import os
sys.path.append(os.getcwd())
# %%
from cs225sp20_env.Graph.VertexList import VertexList
from cs225sp20_env.Graph.WEdgeList import WEdgeList
from cs225sp20_env.List.PyList import PyList
from cs225sp20_env.List.Fifo import Fifo
# %%
class WDiGraph:
def __init__(self, edges=[]):
self.vertexList = VertexList(edges)
for e in edges:
self.addEdge(e)
#self.addEdge((e[1], e[0], e[2]))
def addEdge(self, edge):
vertex = self.vertexList.locate(edge[0])
edgelist = vertex.edges
if edgelist != None:
edgelist.add(edge[1], edge[2])
else:
edgelist = WEdgeList(edge[1],edge[2])
vertex.setEdges(edgelist)
def __iter__(self):
vertices = self.vertexList
for v in vertices:
x = vertices.locate(v)
y = x.edges
if y != None:
for z in y:
yield (v, z[0], z[1])
def insertVertex(self, item):
if not (item in self.vertexList):
self.vertexList.append(item)
def deleteVertex(self, item):
return self.vertexList.remove(item)
def insertEdge(self, edge):
self.vertexList.addVertex(edge)
self.addEdge(edge)
# self.addEdge((edge[1], edge[0], edge[2]))
def deleteEdge(self, edge):
self.__deleteEdge(edge)
# self.__deleteEdge((edge[1], edge[0], edge[2]))
def __deleteEdge(self, edge):
if not (edge[0] in self.vertexList):
print("There is no edge", edge)
return False
vertexlocation = self.vertexList.locate(edge[0])
edgelist = vertexlocation.getEdges()
if edgelist == None:
print("There is no edge", edge)
return False
res = edgelist.remove(edge[1])
if res == False:
print("There is no edge", edge)
return res
def outgoingEdges(self, item):
vertex = self.vertexList.locate(item)
if vertex == None:
print("There is no vertex", item)
return []
edgelist = vertex.getEdges()
if edgelist == None:
return []
res = []
for v in edgelist:
res.append((item, v[0], v[1]))
return res
# yield (item,v[0],v[1]) # If we replace the above two lines with this line, then this methods works as an iterator.
def bfs_KD(self, vertex):
if not (vertex in self.vertexList):
print("There is no vertex", vertex)
return None
length = self.vertexList.getlength()
distance = [None] * length
parent = [None] * length
index = self.vertexList.index(vertex)
distance[index] = 0
parent[index] = vertex
currentlayer = Fifo(length)
currentlayer.pushback(vertex)
nextlayer = Fifo(length)
for l in range(length):
for u in currentlayer:
# print(u)
loc = self.vertexList.locate(u)
edgelist = loc.getEdges()
if edgelist != None:
for v in edgelist:
idx = self.vertexList.index(v[0])
if parent[idx] == None:
nextlayer.pushback(v[0])
distance[idx] = l + 1
parent[idx] = u
currentlayer = nextlayer
nextlayer = Fifo(length)
return (distance, parent)
def bfs(self, vertex, index):
if not (vertex in self.vertexList):
print("There is no vertex", vertex)
return None
length = self.vertexList.getlength()
self.distance[index] = 0
self.parent[index] = vertex
queue = []
queue.append(vertex)
head = 0 # head index of queue
while head < len(queue):
u = queue[head]
index = self.vertexList.index(u)
cur_distance = self.distance[index]
loc = self.vertexList.locate(u)
edgelist = loc.getEdges()
if edgelist != None:
for v in edgelist:
idx = self.vertexList.index(v[0])
if self.parent[idx] == None:
queue.append(v[0])
self.distance[idx] = cur_distance + 1
self.parent[idx] = u
else:
# TODO handle if meet other vertex in the same subset
pass
head += 1
def allBFS(self):
numVertices = self.vertexList.getlength()
self.distance = [None] * numVertices
self.parent = [None] * numVertices
for s in self.vertexList:
idx = self.vertexList.index(s)
if self.distance[idx] == None:
self.bfs(s, idx)
return (self.distance, self.parent)
#DFS traverse using recursion
def allDFS(self):
numVertices = self.vertexList.getlength()
initlist = [None] * numVertices
self.tree = PyList(initlist, numVertices)
for i in range(numVertices):
newgraph = WDiGraph([])
self.tree[i] = newgraph
self.mark = [None] * numVertices
self.dfsPos = 1
self.dfsNum = [1] * numVertices
self.finishingTime = 1
self.finishTime = [1] * numVertices
for s in self.vertexList:
idx = self.vertexList.index(s)
if self.mark[idx] == None:
self.mark[idx] = s
self.dfsNum[idx] = self.dfsPos
self.dfsPos += 1
self.dfs(s, idx)
def dfs(self, vertex, index):
for e in self.outgoingEdges(vertex):
idx = self.vertexList.index(e[1])
if self.mark[idx] == None:
self.tree[index].insertEdge(e)
self.__traverseTreeEdge(e)
self.mark[idx] = e[1]
self.dfs(e[1], index)
self.backtrack(vertex)
def __traverseTreeEdge(self, e):
idx = self.vertexList.index(e[1])
self.dfsNum[idx] = self.dfsPos
self.dfsPos += 1
def backtrack(self, vertex):
idx = self.vertexList.index(vertex)
self.finishTime[idx] = self.finishingTime
self.finishingTime += 1
# %%
if __name__ == "__main__":
edges = [(1, 2, 0.1), (2, 4, 0.2), (3, 5, 0.3), (2, 5, 0.4),
(1, 5, 0.5), (3, 4, 0.6), (3, 1, 0.7), (6, 2, 0.8), (6, 3, 0.9)]
g = WDiGraph(edges)
print(g.outgoingEdges(1))
print([v for v in g.vertexList])
g.insertVertex(7)
g.insertVertex(8)
print([v for v in g.vertexList])
g.deleteVertex(1)
g.deleteVertex(7)
print([v for v in g.vertexList])
print([e for e in g])
g.insertEdge((1, 7, 1.1))
print([e for e in g])
g.deleteEdge((1, 2, 1.2))
print([e for e in g])
# you can install this package on your own environment to help understand
import networkx as nx
import matplotlib.pyplot as plt
# visualization
G = nx.Graph()
G.add_weighted_edges_from(edges)
print("Print all vertices:{}".format(G.nodes()))
print("Print all edges:{}".format(G.edges()))
print("Print the number of edges:{}".format(G.number_of_edges()))
nx.draw_networkx(G)
plt.show()
graph = WDiGraph(edges)
graph.allDFS()
for s in graph.vertexList:
idx = graph.vertexList.index(s)
print(s, ':', [e for e in graph.tree[idx]])
graph = WDiGraph([(1, 2, 0.1), (2, 4, 0.2), (3, 5, 0.3), (2, 5, 0.4), (1, 5, 0.5), (3, 4, 0.6), (3, 1, 0.7), (6, 2, 0.8), (6, 3, 0.9),
(61, 65, 1.1), (63, 64, 1.2), (63, 66, 1.3), (62, 64, 1.4), (62, 66, 1.5)])
distance, parent = graph.bfs_KD(1)
print("distance: \t%s\nparent: \t%s" % (distance, parent))
distance, parent = graph.allBFS()
print("distance: \t%s\nparent: \t%s" % (distance, parent))
def graph_generating(n):
import numpy as np
graph=WDiGraph()
for i in range(n):
for j in range(i):
graph.insertEdge((i, j, max(np.random.randn(),0.1)))
print([e for e in graph])
return graph
g=graph_generating(8)
| python | 10,169 |
#!/usr/bin/env python
#
# A minimal Python language binding for the OpsRamp REST API.
#
# base.py
# Containing various base classes used in other parts of the library
# but not intended for direct use by callers.
#
# (c) Copyright 2019-2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import requests
try:
# Python 3
from urllib import parse as urlparse
from simplejson.errors import JSONDecodeError
except ImportError:
# Python 2
import urlparse
JSONDecodeError = ValueError
class Helpers(object):
# (DW) Add support for retries of requests to the OpsRamp API in the event
# of receiving a HTTP 429 (Too Many Requests) response from the API to
# suggest that it has activated rate limiting. This implements progressive
# backoff (i.e. gradually increasing the delay between attempts) until the
# maximum number of retries is reached.
# By wrapping this retry handler around the session instance being used
# inside instances of the ApiWrapper class the effect of this should be
# more or less transparent to the rest of the code; at least until rate
# limiting kicks in, at which point it will hopefully slow down but still
# "get there" so to speak, and seems to be non-"hacky".
# The defaults *should* be sensible.
# Note: if the status_forcelist tuple only has one value (e.g 429) then a
# trailing comma is REQUIRED for Python to interpret it correctly as a
# tuple: type((429)) is 'int', whereas type((429,)) is 'tuple'.
# Borrowed from:
# https://www.peterbe.com/plog/best-practice-with-retries-with-requests
retryclass = requests.packages.urllib3.util.Retry
@staticmethod
def session_add_retry_handler(session=None):
# urllib3 does not retry on POST by default, but we want to iff the
# return status is 429 rate limiting, on the assumption that this
# means the POST did not happen and is therefore safe to retry.
try:
http_verbs = set(Helpers.retryclass.DEFAULT_ALLOWED_METHODS)
except AttributeError:
# it has a different name in older versions of urllib3
http_verbs = set(Helpers.retryclass.DEFAULT_METHOD_WHITELIST)
http_verbs.add('POST')
retry = Helpers.create_retry_handler(
retries=7,
backoff_factor=0.5,
status_forcelist=(429,),
allowed_methods=http_verbs
)
adapter = requests.adapters.HTTPAdapter(max_retries=retry)
session = session or requests.Session()
session.mount(prefix='http://', adapter=adapter)
session.mount(prefix='https://', adapter=adapter)
return session
@staticmethod
def create_retry_handler(retries, backoff_factor, status_forcelist,
allowed_methods):
assert isinstance(retries, int)
assert retries >= 0
assert backoff_factor >= 0
assert isinstance(status_forcelist, tuple)
return Helpers.retryclass(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist
)
class PathTracker(object):
def __init__(self):
self.reset()
def reset(self):
self.prefix = ''
self.stack = []
def __str__(self):
return '%s "%s" %s' % (str(type(self)), self.prefix, self.stack)
def clone(self):
new1 = PathTracker()
new1.prefix = self.prefix
new1.stack = self.stack
return new1
def cd(self, path=None):
path = str(path or '/')
# no support for '..' right now, maybe in the future
if path[0] == '/':
self.prefix = path
else:
self.prefix += '/' + path
self.prefix = self.prefix.strip('/')
return self.prefix
def pushd(self, path=None):
self.stack.append(self.prefix)
return self.cd(path)
def popd(self):
self.prefix = self.stack.pop()
return self.prefix
def fullpath(self, suffix=None):
suffix = str(suffix or '')
if len(suffix) > 0 and suffix[0] == '/':
retval = suffix
else:
retval = ''
if len(self.prefix) > 0:
retval += '/' + self.prefix
if len(suffix) > 0:
retval += '/' + suffix
return retval
class ApiObject(object):
def __init__(self, url, auth, tracker=None, session=None):
self.baseurl = url.rstrip('/')
self.auth = auth
if tracker:
self.tracker = tracker
else:
self.tracker = PathTracker()
self.session = Helpers.session_add_retry_handler(session=session)
def __str__(self):
return '%s "%s" "%s"' % (
str(type(self)), self.baseurl, self.tracker.fullpath()
)
def clone(self):
new1 = ApiObject(
self.baseurl,
self.auth,
self.tracker.clone(),
self.session
)
return new1
def cd(self, path=None):
self.tracker.cd(path)
return self.compute_url()
def pushd(self, path=None):
self.tracker.pushd(path)
return self.compute_url()
def popd(self):
self.tracker.popd()
return self.compute_url()
def chroot(self, suffix=''):
suffix = self.tracker.fullpath(suffix)
if suffix:
self.baseurl += suffix
self.tracker.reset()
return self.compute_url()
def collate_pages(self, get_request, data):
"""Given a GET request whose results span across multiple pages, crawl
each page and collate the results.
:param first_page_data: "results" dict for first pageful of data
:type first_page_data: dict
:param request: Request used to get first page
:type request: requests.PreparedRequest
"""
# First, sanity check that all is good. Only process GET requests:
if get_request.method.upper().strip() != "GET":
return data
# Only attempt to pull subsequent pages if we can verify that there are
# subsequent pages "to be pulled"...
if isinstance(data, dict) and "results" in data.keys():
collated_data = data["results"]
while "nextPage" in data.keys() and data["nextPage"]:
# Get the next page full of data.
next_page = self.session.get(
get_request.url,
params={'pageNo': int(data['pageNo']) + 1},
headers=get_request.headers
)
if not next_page.ok:
# Return an empty result.
collated_data = []
break
data = next_page.json()
collated_data = collated_data + data['results']
del next_page
# Dismantle the URL to see if data was requested in descending
# order...
query_params = dict(
urlparse.parse_qsl(urlparse.urlsplit(get_request.url).query)
)
descending_order = 'isDescendingOrder' in query_params.keys() and \
query_params['isDescendingOrder']
# Re-create the final data set as if it were a single page
# containing all records to ensure that existing stuff that expects
# this data structure doesn't fall over.
return {
'results': collated_data,
'totalResults': len(collated_data),
'pageNo': 1,
'pageSize': len(collated_data),
'nextPage': False,
'previousPageNo': 0,
'descendingOrder': descending_order
}
else:
return data
def compute_url(self, suffix=''):
retval = self.baseurl
suffix = self.tracker.fullpath(suffix)
if suffix:
retval += suffix
return retval.rstrip('/')
def prep_headers(self, headers):
if not headers:
return self.auth
hdr = {}
hdr.update(self.auth)
hdr.update(headers)
return hdr
def process_result(self, url, resp):
if resp.status_code != requests.codes.OK:
msg = '%s %s %s %s' % (
resp,
resp.request.method,
url,
resp.content
)
raise RuntimeError(msg)
try:
data = resp.json()
# Some GET requests return paginated output. If all the data fits
# in one page, return just the contents of the "results" list,
# otherwise, we need to do an assembly job to collate the entire
# list of results from all pages and return the full list.
if resp.request.method == "GET" and isinstance(data, dict) and \
data.get("nextPage", None):
return self.collate_pages(resp.request, data=data)
else:
return data
except JSONDecodeError:
return resp.text
def get(self, suffix=None, headers=None):
url = self.compute_url(suffix)
hdr = self.prep_headers(headers)
resp = self.session.get(url, headers=hdr)
return self.process_result(url, resp)
def post(self, suffix=None, headers=None, data=None, json=None):
url = self.compute_url(suffix)
hdr = self.prep_headers(headers)
resp = self.session.post(url, headers=hdr, data=data, json=json)
return self.process_result(url, resp)
def put(self, suffix=None, headers=None, data=None, json=None):
url = self.compute_url(suffix)
hdr = self.prep_headers(headers)
resp = self.session.put(url, headers=hdr, data=data, json=json)
return self.process_result(url, resp)
def delete(self, suffix=None, headers=None, data=None, json=None):
url = self.compute_url(suffix)
hdr = self.prep_headers(headers)
resp = self.session.delete(url, headers=hdr, data=data, json=json)
return self.process_result(url, resp)
def patch(self, suffix=None, headers=None, data=None, json=None):
url = self.compute_url(suffix)
hdr = self.prep_headers(headers)
resp = self.session.patch(url, headers=hdr, data=data, json=json)
return self.process_result(url, resp)
class ApiWrapper(object):
def __init__(self, apiobject, suffix=None):
self.api = apiobject.clone()
if suffix:
self.api.chroot(suffix)
def __str__(self):
return '%s %s' % (str(type(self)), self.api)
def get(self, suffix=None, headers=None):
return self.api.get(suffix, headers=headers)
def post(self, suffix=None, headers=None, data=None, json=None):
return self.api.post(suffix, headers=headers, data=data, json=json)
def put(self, suffix=None, headers=None, data=None, json=None):
return self.api.put(suffix, headers=headers, data=data, json=json)
def delete(self, suffix=None, headers=None, data=None, json=None):
return self.api.delete(suffix, headers=headers, data=data, json=json)
def patch(self, suffix=None, headers=None, data=None, json=None):
return self.api.patch(suffix, headers=headers, data=data, json=json)
| python | 12,021 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from warehouse.utils.static import ManifestCacheBuster
class TestManifestCacheBuster:
def test_returns_when_valid(self, monkeypatch):
monkeypatch.setattr(
ManifestCacheBuster,
"get_manifest",
lambda x: {"/the/path/style.css": "/the/busted/path/style.css"},
)
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json")
result = cb(None, "/the/path/style.css", {"keyword": "arg"})
assert result == ("/the/busted/path/style.css", {"keyword": "arg"})
def test_raises_when_invalid(self, monkeypatch):
monkeypatch.setattr(ManifestCacheBuster, "get_manifest", lambda x: {})
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json")
with pytest.raises(ValueError):
cb(None, "/the/path/style.css", {"keyword": "arg"})
def test_returns_when_invalid_and_not_strict(self, monkeypatch):
monkeypatch.setattr(ManifestCacheBuster, "get_manifest", lambda x: {})
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json", strict=False)
result = cb(None, "/the/path/style.css", {"keyword": "arg"})
assert result == ("/the/path/style.css", {"keyword": "arg"})
| python | 1,775 |
import os.path
from collections import OrderedDict
from os import walk
from time import strftime
from typing import Union
from twisted.plugin import IPlugin
from zope.interface import implementer
from desertbot.message import IRCMessage
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand
from desertbot.response import IRCResponse
try:
import re2
except ImportError:
import re as re2
@implementer(IPlugin, IModule)
class LogSearch(BotCommand):
def triggers(self):
return ['firstseen', 'lastseen', 'lastsaw', 'firstsaid', 'lastsaid', 'saidbeforetoday']
def help(self, query: Union[str, None]) -> str:
command = query[0].lower()
if command in self._commands:
return self._commands[command].__doc__
else:
return f"{', '.join(self._commands.keys())} - Search the logs by nickname or (part of) a message."
def execute(self, message: IRCMessage):
if len(message.parameterList) < 1:
return IRCResponse('Search what?', message.replyTo)
return IRCResponse(self._commands[message.command](self, message), message.replyTo)
def _getLogs(self, message):
basePath = self.bot.logPath
logPath = os.path.join(basePath, self.bot.server, message.replyTo)
logs = []
for (dirpath, dirnames, filenames) in walk(logPath):
logs.extend(filenames)
break
logs.sort()
return logPath, logs
def _firstseen(self, message: IRCMessage):
"""firstseen <nick> | Search for the first line someone with the given nick spoke."""
logPath, logs = self._getLogs(message)
return self._search(message.parameters, logPath, logs, True, True, False)
def _lastseen(self, message: IRCMessage):
"""lastseen <nick> | Search for the last line someone with the given nick spoke. Includes today."""
logPath, logs = self._getLogs(message)
return self._search(message.parameters, logPath, logs, True, True, True)
def _lastsaw(self, message: IRCMessage):
"""lastsaw <nick> | Search for the last line someone with the given nick spoke. Does not include today."""
logPath, logs = self._getLogs(message)
return self._search(message.parameters, logPath, logs, True, False, True)
def _firstsaid(self, message: IRCMessage):
"""firstsaid <messagepart> | Search for the first time a given thing was said."""
logPath, logs = self._getLogs(message)
return self._search(message.parameters, logPath, logs, False, True, False)
def _lastsaid(self, message: IRCMessage):
"""lastsaid <messagepart> | Search for the last time a given thing was said."""
logPath, logs = self._getLogs(message)
return self._search(message.parameters, logPath, logs, False, True, True)
def _saidbeforetoday(self, message: IRCMessage):
"""saidbeforetoday <messagepart> | Search for the last time a given thing was said, before today."""
logPath, logs = self._getLogs(message)
return self._search(message.parameters, logPath, logs, False, False, True)
def _search(self, searchTerms, logPath, files, searchForNick, includeToday, reverse):
candidatePattern = re2.compile(searchTerms, re2.IGNORECASE)
if searchForNick:
fullPattern = re2.compile(fr"^\[[^]]+\]\s+<(.?{searchTerms})>\s+.*", re2.IGNORECASE)
else:
fullPattern = re2.compile(fr'.*<.*> .*({searchTerms}).*', re2.IGNORECASE)
found = None
today = f"{strftime('%Y-%m-%d')}.log"
if today in files and not includeToday:
files.remove(today)
if reverse:
files.reverse()
for filename in files:
with open(os.path.join(logPath, filename), 'r', errors='ignore') as logfile:
contents = logfile.read()
# We do an initial check to see if our searchTerms show up anywhere in the file.
# If they don't, we know the file contains no matches and move on.
# If they do, we move on to the more expensive line search.
if not candidatePattern.search(contents):
continue
lines = contents.rstrip().split('\n') # remove trailing newline or we end up with a blank line in the list
if reverse:
lines = reversed(lines)
if reverse and includeToday and filename == today:
lines = list(lines)[1:]
for line in lines:
if fullPattern.match(line.rstrip()):
found = line.rstrip()
break
if found:
return f'[{filename[:10]}] {found}'
return 'Nothing that matches your search terms has been found in the log.'
_commands = OrderedDict([
('firstseen', _firstseen),
('lastseen', _lastseen),
('lastsaw', _lastsaw),
('firstsaid', _firstsaid),
('lastsaid', _lastsaid),
('saidbeforetoday', _saidbeforetoday)
])
logsearch = LogSearch()
| python | 5,108 |
"""
Runs a basic test against an ORES api assuming that it supports the standard
configuration "/v2/testwiki/revid/..."
:Usage:
test_api -h | --help
test_api <ores-url> [--debug]
:Options:
-h --help Prints this documentation
<ores-url> URL of base ORES webserver
--debug Print debugging information
"""
import json
import logging
import docopt
import requests
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
# Requests is loud. Be quiet requests.
requests.packages.urllib3.disable_warnings()
ores_url = args['<ores-url>']
make_request(ores_url, "/")
make_request(ores_url, "/ui/")
make_request(ores_url, "/scores/", is_json=True)
make_request(ores_url, "/v1/spec/", is_json=True)
make_request(ores_url, "/v2/spec/", is_json=True)
make_request(ores_url, "/v3/spec/", is_json=True)
make_request(
ores_url, "/v1/scores/testwiki/revid/2342342/", is_json=True,
equal_to={"2342342": {"prediction": False,
"probability": {"false": 0.76, "true": 0.24}}})
make_request(
ores_url, "/v2/scores/testwiki/revid/2342342/", is_json=True,
equal_to={"scores": {"testwiki": {"revid": {"scores": {
"2342342": {
"prediction": False,
"probability": {"false": 0.76, "true": 0.24}
}
}, "version": "0.0.0"}}}})
make_request(
ores_url, "/v3/scores/testwiki/2342342/revid/", is_json=True,
equal_to={"testwiki": {
"models": {"revid": {"version": "0.0.0"}},
"scores": {"2342342": {
"revid": {"score": {"prediction": False,
"probability": {"false": 0.76,
"true": 0.24}}}
}}}})
response = requests.get(ores_url + "/404/")
assert response.status_code == 404, "/404/ didn't get a 404!"
make_request(
ores_url,
"/v3/scores/testwiki/2342342/revid/?features&feature.delay=16",
is_json=True,
equal_to={"testwiki": {
"models": {"revid": {"version": "0.0.0"}},
"scores": {"2342342": {
"revid": {"error": {'message': 'Timed out after 15 seconds.',
'type': 'TimeoutError'}}
}}}})
other_wiki_event = {
"comment": "/* K-O */", "database": "enwiki",
"meta": {
"domain": "en.wikipedia.org", "dt": "2017-12-05T15:56:51+00:00",
"id": "e87a7723-d9d4-11e7-9e8e-141877613bad",
"request_id": "3464552a-85d0-404e-aa24-80b74473b15f",
"schema_uri": "mediawiki/revision/create/2",
"topic": "eqiad.mediawiki.revision-create",
"uri": "https://en.wikipedia.org/wiki/List_of_Ateneo_de_Manila_University_people",
"partition": 0, "offset": 561544941
},
"page_id": 4716305, "page_is_redirect": False,
"page_namespace": 0,
"page_title": "List_of_Ateneo_de_Manila_University_people",
"parsedcomment": "<a href=\"/wiki/List_of_Ateneo_de_Manila_University_" +
"people#K-O\" title=\"List of Ateneo de Manila " +
"University people\">→</a><span dir=\"auto\">" +
"<span class=\"autocomment\">K-O</span></span>",
"performer": {
"user_edit_count": 13845,
"user_groups": ["extendedconfirmed", "*", "user", "autoconfirmed"],
"user_id": 24365224, "user_is_bot": False,
"user_registration_dt": "2015-03-09T02:40:31Z",
"user_text": "Khendygirl"
},
"rev_content_changed": True, "rev_content_format": "wikitext",
"rev_content_model": "wikitext", "rev_id": 813852458,
"rev_len": 60647, "rev_minor_edit": False,
"rev_parent_id": 813852231,
"rev_sha1": "0a6fggdfff46x0bptqle6pycuz0paht",
"rev_timestamp": "2017-12-05T15:56:51Z"
}
make_request(
ores_url,
"/v3/precache",
post_json=other_wiki_event,
http_code=204)
own_event = other_wiki_event
own_event['database'] = "testwiki"
make_request(
ores_url,
"/v3/precache",
is_json=True,
post_json=own_event,
http_code=200,
equal_to={
"testwiki": {
"models": {"revid": {"version": "0.0.0"}},
"scores": {"813852458": {"revid": {"score": {
"prediction": True,
"probability": {
"false": 0.15000000000000002,
"true": 0.85
}}}}}
}})
def make_request(ores_url, path, http_code=200, is_json=False,
equal_to=None, post_json=None):
logger.debug("Requesting {0}".format(path))
if post_json is None:
response = requests.get(ores_url + path)
else:
response = requests.post(ores_url + path, json=post_json)
assert response.status_code == http_code, \
"Status code mismatch {0} for {1}: {2}".format(
response.status_code, path, response.content)
content = response.text
if is_json:
try:
content = json.loads(content)
except ValueError:
raise RuntimeError("Could not parse the following as JSON: '{0}"
.format(content[0:100]))
if equal_to:
assert content == equal_to, "{0} != {1}".format(content, equal_to)
return response
| python | 5,854 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/containerregistry/v1/repository.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/containerregistry/v1/repository.proto',
package='yandex.cloud.containerregistry.v1',
syntax='proto3',
serialized_options=b'\n%yandex.cloud.api.containerregistry.v1ZWgithub.com/yandex-cloud/go-genproto/yandex/cloud/containerregistry/v1;containerregistry',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n2yandex/cloud/containerregistry/v1/repository.proto\x12!yandex.cloud.containerregistry.v1\"&\n\nRepository\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\tB\x80\x01\n%yandex.cloud.api.containerregistry.v1ZWgithub.com/yandex-cloud/go-genproto/yandex/cloud/containerregistry/v1;containerregistryb\x06proto3'
)
_REPOSITORY = _descriptor.Descriptor(
name='Repository',
full_name='yandex.cloud.containerregistry.v1.Repository',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.containerregistry.v1.Repository.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='yandex.cloud.containerregistry.v1.Repository.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=127,
)
DESCRIPTOR.message_types_by_name['Repository'] = _REPOSITORY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Repository = _reflection.GeneratedProtocolMessageType('Repository', (_message.Message,), {
'DESCRIPTOR' : _REPOSITORY,
'__module__' : 'yandex.cloud.containerregistry.v1.repository_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.containerregistry.v1.Repository)
})
_sym_db.RegisterMessage(Repository)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| python | 3,024 |
import copy
import typing
import numpy
import numbers
from tequila.utils.bitstrings import BitNumbering, BitString, initialize_bitstring
from tequila import TequilaException
from tequila.utils.keymap import KeyMapLSB2MSB, KeyMapMSB2LSB
from tequila.tools import number_to_string
# from __future__ import annotations # can use that in python 3.7+ to get rid of string type hints
if typing.TYPE_CHECKING:
# don't need those structures, just for convenient type hinting
from tequila.hamiltonian.qubit_hamiltonian import QubitHamiltonian, PauliString
class QubitWaveFunction:
"""
Store Wavefunction as dictionary of comp. basis state and complex numbers
Use the same structure for Measurments results with int instead of complex numbers (counts)
"""
numbering = BitNumbering.MSB
def apply_keymap(self, keymap, initial_state: BitString = None):
self.n_qubits = keymap.n_qubits
mapped_state = dict()
for k, v in self.state.items():
mapped_key=keymap(input_state=k, initial_state=initial_state)
if mapped_key in mapped_state:
mapped_state[mapped_key] += v
else:
mapped_state[mapped_key] = v
self.state = mapped_state
return self
@property
def n_qubits(self) -> int:
if self._n_qubits is None:
return self.min_qubits()
else:
return max(self._n_qubits, self.min_qubits())
def min_qubits(self) -> int:
if len(self.state) > 0:
maxk = max(self.state.keys())
return maxk.nbits
else:
return 0
@n_qubits.setter
def n_qubits(self, n_qubits):
if n_qubits is not None:
self._n_qubits = max(n_qubits, self.min_qubits())
return self
@property
def state(self):
if self._state is None:
return dict()
else:
return self._state
@state.setter
def state(self, other: typing.Dict[BitString, complex]):
assert (isinstance(other, dict))
self._state = other
def __init__(self, state: typing.Dict[BitString, complex] = None, n_qubits=None):
if state is None:
self._state = dict()
elif isinstance(state, int):
self._state = self.from_int(i=state, n_qubits=n_qubits).state
elif isinstance(state, str):
self._state = self.from_string(string=state, n_qubits=n_qubits).state
elif isinstance(state, numpy.ndarray) or isinstance(state, list):
self._state = self.from_array(arr=state, n_qubits=n_qubits).state
elif hasattr(state, "state"):
self._state = state.state
else:
self._state = state
self._n_qubits = n_qubits
def items(self):
return self.state.items()
def keys(self):
return self.state.keys()
def values(self):
return self.state.values()
@staticmethod
def convert_bitstring(key: typing.Union[BitString, numbers.Integral], n_qubits):
if isinstance(key, numbers.Integral):
return BitString.from_int(integer=key, nbits=n_qubits)
elif isinstance(key, str):
return BitString.from_binary(binary=key, nbits=n_qubits)
else:
return key
def __getitem__(self, item: BitString):
key = self.convert_bitstring(item, self.n_qubits)
return self.state[key]
def __call__(self, key, *args, **kwargs) -> numbers.Number:
"""
Like getitem but returns zero if key is not there
Parameters
----------
key: bitstring (or int or str)
Returns
-------
Return the amplitude or measurement occurence of a bitstring
"""
ckey = self.convert_bitstring(key, self.n_qubits)
if ckey in self.state:
return self.state[ckey]
else:
return 0.0
def __setitem__(self, key: BitString, value: numbers.Number):
self._state[self.convert_bitstring(key, self.n_qubits)] = value
return self
def __contains__(self, item: BitString):
return self.convert_bitstring(item, self.n_qubits) in self.keys()
def __len__(self):
return len(self.state)
@classmethod
def from_array(cls, arr: numpy.ndarray, keymap=None, threshold: float = 1.e-6,
numbering: BitNumbering = BitNumbering.MSB, n_qubits: int = None):
arr = numpy.asarray(arr)
assert (len(arr.shape) == 1)
state = dict()
maxkey = len(arr) - 1
maxbit = initialize_bitstring(integer=maxkey, numbering_in=numbering, numbering_out=cls.numbering).nbits
for ii, v in enumerate(arr):
i = initialize_bitstring(integer=ii, nbits=maxbit, numbering_in=numbering, numbering_out=cls.numbering)
if not numpy.isclose(abs(v), 0.0, atol=threshold):
key = i if keymap is None else keymap(i)
state[key] = v
result = QubitWaveFunction(state, n_qubits=n_qubits)
if cls.numbering != numbering:
if cls.numbering == BitNumbering.MSB:
result.apply_keymap(keymap=KeyMapLSB2MSB())
else:
result.apply_keymap(keymap=KeyMapMSB2LSB())
return result
@classmethod
def from_int(cls, i: int, coeff=1, n_qubits: int = None):
if isinstance(i, BitString):
return QubitWaveFunction(state={i: coeff}, n_qubits=n_qubits)
else:
return QubitWaveFunction(state={BitString.from_int(integer=i, nbits=n_qubits): coeff}, n_qubits=n_qubits)
@classmethod
def from_string(cls, string: str, n_qubits: int = None):
"""
Complex values like (x+iy)|...> will currently not work, you need to type Real and imaginary separately
Or improve this constructor :-)
e.g instead of (0.5+1.0j)|0101> do 0.5|0101> + 1.0j|0101>
:param paths:
:param string:
:return:
"""
try:
state = dict()
string = string.replace(" ", "")
string = string.replace("*", "")
string = string.replace("+-", "-")
string = string.replace("-+", "-")
terms = (string + "terminate").split('>')
for term in terms:
if term == 'terminate':
break
tmp = term.split("|")
coeff = tmp[0]
if coeff == '':
coeff = 1.0
else:
coeff = complex(coeff)
basis_state = BitString.from_binary(binary=tmp[1])
state[basis_state] = coeff
except ValueError:
raise TequilaException("Failed to initialize QubitWaveFunction from string:" + string + "\n"
"did you try complex values?\n"
"currently you need to type real and imaginary parts separately\n"
"e.g. instead of (0.5+1.0j)|0101> do 0.5|0101> + 1.0j|0101>")
except:
raise TequilaException("Failed to initialize QubitWaveFunction from string:" + string)
return QubitWaveFunction(state=state, n_qubits=n_qubits)
def __repr__(self):
result = str()
for k, v in self.items():
result += number_to_string(number=v) + "|" + str(k.binary) + "> "
return result
def __eq__(self, other):
raise TequilaException("Wavefunction equality is not well-defined. Consider using inner"
+ " product equality, wf1.isclose(wf2).")
def isclose(self : 'QubitWaveFunction',
other : 'QubitWaveFunction',
rtol : float=1e-5,
atol : float=1e-8) -> bool:
"""Return whether this wavefunction is similar to the target wavefunction."""
over1 = complex(self.inner(other))
over2 = numpy.sqrt(complex(self.inner(self) * other.inner(other)))
# Explicit casts to complex() is required if self or other are sympy
# wavefunction with sympy-typed amplitudes
# Check if the two numbers are equal.
return numpy.isclose(over1, over2, rtol=rtol, atol=atol)
def __add__(self, other):
result = QubitWaveFunction(state=copy.deepcopy(self._state))
for k, v in other.items():
if k in result._state:
result._state[k] += v
else:
result._state[k] = v
return result
def __sub__(self, other):
return self + -1.0 * other
def __iadd__(self, other):
for k, v in other.items():
if k in self._state:
self._state[k] += v
else:
self._state[k] = v
return self
def __rmul__(self, other):
result = QubitWaveFunction(state=copy.deepcopy(self._state))
for k, v in result._state.items():
result._state[k] *= other
return result
def inner(self, other):
# currently very slow and not optimized in any way
result = 0.0
for k, v in self.items():
if k in other._state:
result += v.conjugate() * other._state[k]
return result
def normalize(self):
"""
NOT AN Inplace operation
:return: Normalizes the wavefunction/countrate
"""
norm2 = self.inner(other=self)
normalized = 1.0 / numpy.sqrt(norm2) * self
return normalized
def compute_expectationvalue(self, operator: 'QubitHamiltonian') -> numbers.Real:
tmp = self.apply_qubitoperator(operator=operator)
E = self.inner(other=tmp)
if hasattr(E, "imag") and numpy.isclose(E.imag, 0.0, atol=1.e-6):
return float(E.real)
else:
return E
def apply_qubitoperator(self, operator: 'QubitHamiltonian'):
"""
Inefficient function which computes the action of a QubitHamiltonian on this wfn
:param operator: QubitOperator
:return: resulting Qubitwavefunction
"""
result = QubitWaveFunction()
for ps in operator.paulistrings:
result += self.apply_paulistring(paulistring=ps)
return result
def apply_paulistring(self, paulistring: 'PauliString'):
"""
Inefficient function which computes action of a single paulistring
:param paulistring: PauliString
:return: Expectation Value
"""
result = QubitWaveFunction()
for k, v in self.items():
arr = k.array
c = v
for idx, p in paulistring.items():
if p.lower() == "x":
arr[idx] = (arr[idx] + 1) % 2
elif p.lower() == "y":
c *= 1.0j * (-1) ** (arr[idx])
arr[idx] = (arr[idx] + 1) % 2
elif p.lower() == "z":
c *= (-1) ** (arr[idx])
else:
raise TequilaException("unknown pauli: " + str(p))
result[BitString.from_array(array=arr)] = c
return paulistring.coeff * result
def to_array(self):
result = numpy.zeros(shape=2 ** self.n_qubits, dtype=numpy.complex)
for k, v in self.items():
result[int(k)] = v
return result
def simplify(self, threshold = 1.e-8):
state = {}
for k, v in self.state.items():
if not numpy.isclose(v, 0.0, atol=threshold):
state[k] = v
return QubitWaveFunction(state=state)
| python | 11,807 |
import os
import pytest
import glob
from tests.lib.path import Path
from tests.lib import TestFailure
def test_install_from_future_wheel_version(script, data):
"""
Test installing a future wheel
"""
package = data.packages.join("futurewheel-3.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
with pytest.raises(TestFailure):
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
package = data.packages.join("futurewheel-1.9-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
def test_install_from_broken_wheel(script, data):
"""
Test that installing a broken wheel fails properly
"""
package = data.packages.join("brokenwheel-1.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
with pytest.raises(TestFailure):
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
def test_install_from_wheel(script, data):
"""
Test installing from a wheel (that has a script)
"""
result = script.pip(
'install', 'has.script==1.0', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
dist_info_folder = script.site_packages / 'has.script-1.0.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
script_file = script.bin / 'script.py'
assert script_file in result.files_created
def test_install_from_wheel_with_extras(script, data):
"""
Test installing from a wheel with extras.
"""
result = script.pip(
'install', 'complex-dist[simple]', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
dist_info_folder = script.site_packages / 'complex_dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
def test_install_from_wheel_file(script, data):
"""
Test installing directly from a wheel file.
"""
package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
# header installs are broke in pypy virtualenvs
# https://github.com/pypa/virtualenv/issues/510
@pytest.mark.skipif("hasattr(sys, 'pypy_version_info')")
def test_install_from_wheel_with_headers(script, data):
"""
Test installing from a wheel file with headers
"""
package = data.packages.join("headers.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'headers.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
def test_install_wheel_with_target(script, data):
"""
Test installing a wheel using pip install --target
"""
script.pip('install', 'wheel')
target_dir = script.scratch_path / 'target'
result = script.pip(
'install', 'simple.dist==0.1', '-t', target_dir, '--use-wheel',
'--no-index', '--find-links=' + data.find_links,
)
assert Path('scratch') / 'target' / 'simpledist' in result.files_created, (
str(result)
)
def test_install_wheel_with_root(script, data):
"""
Test installing a wheel using pip install --root
"""
root_dir = script.scratch_path / 'root'
result = script.pip(
'install', 'simple.dist==0.1', '--root', root_dir, '--use-wheel',
'--no-index', '--find-links=' + data.find_links,
)
assert Path('scratch') / 'root' in result.files_created
def test_install_from_wheel_installs_deps(script, data):
"""
Test can install dependencies of wheels
"""
# 'requires_source' depends on the 'source' project
package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl")
result = script.pip(
'install', '--no-index', '--find-links', data.find_links, package,
)
result.assert_installed('source', editable=False)
def test_install_from_wheel_no_deps(script, data):
"""
Test --no-deps works with wheel installs
"""
# 'requires_source' depends on the 'source' project
package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl")
result = script.pip(
'install', '--no-index', '--find-links', data.find_links, '--no-deps',
package,
)
pkg_folder = script.site_packages / 'source'
assert pkg_folder not in result.files_created
# --user option is broken in pypy
@pytest.mark.skipif("hasattr(sys, 'pypy_version_info')")
def test_install_user_wheel(script, virtualenv, data):
"""
Test user install from wheel (that has a script)
"""
virtualenv.system_site_packages = True
script.pip('install', 'wheel')
result = script.pip(
'install', 'has.script==1.0', '--user', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
)
egg_info_folder = script.user_site / 'has.script-1.0.dist-info'
assert egg_info_folder in result.files_created, str(result)
script_file = script.user_bin / 'script.py'
assert script_file in result.files_created
def test_install_from_wheel_gen_entrypoint(script, data):
"""
Test installing scripts (entry points are generated)
"""
result = script.pip(
'install', 'script.wheel1a==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
assert wrapper_file in result.files_created
if os.name != "nt":
assert bool(os.access(script.base_path / wrapper_file, os.X_OK))
def test_install_from_wheel_with_legacy(script, data):
"""
Test installing scripts (legacy scripts are preserved)
"""
result = script.pip(
'install', 'script.wheel2a==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
legacy_file1 = script.bin / 'testscript1.bat'
legacy_file2 = script.bin / 'testscript2'
assert legacy_file1 in result.files_created
assert legacy_file2 in result.files_created
def test_install_from_wheel_no_setuptools_entrypoint(script, data):
"""
Test that when we generate scripts, any existing setuptools wrappers in
the wheel are skipped.
"""
result = script.pip(
'install', 'script.wheel1==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
wrapper_helper = script.bin / 't1-script.py'
# The wheel has t1.exe and t1-script.py. We will be generating t1 or
# t1.exe depending on the platform. So we check that the correct wrapper
# is present and that the -script.py helper has been skipped. We can't
# easily test that the wrapper from the wheel has been skipped /
# overwritten without getting very platform-dependent, so omit that.
assert wrapper_file in result.files_created
assert wrapper_helper not in result.files_created
def test_skipping_setuptools_doesnt_skip_legacy(script, data):
"""
Test installing scripts (legacy scripts are preserved even when we skip
setuptools wrappers)
"""
result = script.pip(
'install', 'script.wheel2==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
legacy_file1 = script.bin / 'testscript1.bat'
legacy_file2 = script.bin / 'testscript2'
wrapper_helper = script.bin / 't1-script.py'
assert legacy_file1 in result.files_created
assert legacy_file2 in result.files_created
assert wrapper_helper not in result.files_created
def test_install_from_wheel_gui_entrypoint(script, data):
"""
Test installing scripts (gui entry points are generated)
"""
result = script.pip(
'install', 'script.wheel3==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
assert wrapper_file in result.files_created
def test_wheel_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
script.pip(
"install", "--compile", "simple.dist==0.1", "--no-index",
"--find-links=" + data.find_links
)
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"
)
assert any(exists)
def test_wheel_no_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
script.pip(
"install", "--no-compile", "simple.dist==0.1", "--no-index",
"--find-links=" + data.find_links
)
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"
)
assert not any(exists)
| python | 10,784 |
"""
Trust Region Policy Optimization (TRPO)
---------------------------------------
PG method with a large step can collapse the policy performance,
even with a small step can lead a large differences in policy.
TRPO constraint the step in policy space using KL divergence (rather than in parameter space),
which can monotonically improve performance and avoid a collapsed update.
Reference
---------
Trust Region Policy Optimization, Schulman et al. 2015
High Dimensional Continuous Control Using Generalized Advantage Estimation, Schulman et al. 2016
Approximately Optimal Approximate Reinforcement Learning, Kakade and Langford 2002
openai/spinningup : http://spinningup.openai.com/en/latest/algorithms/trpo.html
Environment
-----------
Openai Gym Pendulum-v0, continual action space
Prerequisites
--------------
tensorflow >=2.0.0a0
tensorflow-probability 0.6.0
tensorlayer >=2.0.0
To run
------
python tutorial_TRPO.py --train/test
"""
import argparse
import copy
import os
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
import tensorflow as tf
import tensorflow_probability as tfp
from gym.spaces import Box, Discrete
import tensorlayer as tl
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=True)
parser.add_argument('--test', dest='train', action='store_false')
parser.add_argument('--env', type=str, default='Pendulum-v0') # environment name
parser.add_argument('--hid', type=int, default=64) # size of each hidden layer
parser.add_argument('--l', type=int, default=2) # hidden layer length
parser.add_argument('--gamma', type=float, default=0.99) # reward discount
parser.add_argument('--seed', '-s', type=int, default=1) # random seed
parser.add_argument('--steps', type=int, default=4000) # total number of steps for each episode
parser.add_argument('--epochs', type=int, default=500) # total number of episodes for training
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = args.env # environment name
HIDDEN_SIZES = [args.hid] * args.l # hidden layer size
SEED = args.seed # random seed
STEPS_PER_EPOCH = args.steps # total number of steps for each episode
EPOCHS = args.epochs # total number of episodes for training
GAMMA = args.gamma # reward discount
DELTA = 0.01 # KL-divergence limit for TRPO update.
VF_LR = 1e-3 # Learning rate for value function optimizer
TRAIN_V_ITERS = 80 # Number of gradient descent steps to take on value function per epoch
DAMPING_COEFF = 0.1 # Artifact for numerical stability
CG_ITERS = 10 # Number of iterations of conjugate gradient to perform
BACKTRACK_ITERS = 10 # Maximum number of steps allowed in the backtracking line search
BACKTRACK_COEFF = 0.8 # How far back to step during backtracking line search
LAM = 0.97 # Lambda for GAE-Lambda
MAX_EP_LEN = 1000 # Maximum length of trajectory
SAVE_FREQ = 10 # How often (in terms of gap between epochs) to save the current policy and value function
EPS = 1e-8 # epsilon
##################### functions ####################
def combined_shape(length, shape=None):
"""
combine length and shape based on shape type
:param length: int length
:param shape: shape, can be either scalar or array
:return: shape
"""
if shape is None:
return length,
return (length, shape) if np.isscalar(shape) else (length, *shape)
def keys_as_sorted_list(dict):
"""
sorted keys of the dict
:param dict: dict input
:return: sorted key list
"""
return sorted(list(dict.keys()))
def values_as_sorted_list(dict):
"""
sorted values of the dict
:param dict: dict input
:return: sorted value list
"""
return [dict[k] for k in keys_as_sorted_list(dict)]
def input_layer(dim=None):
"""
create tensorlayer input layer from dimension input
:param dim: dimension int
:return: tensorlayer input layer
"""
return tl.layers.Input(dtype=tf.float32, shape=combined_shape(None, dim))
def input_layers(*args):
"""
create tensorlayer input layers from a list of dimensions
:param args: a list of dimensions
:return: list of input layers
"""
return [input_layer(dim) for dim in args]
def input_layer_from_space(space):
"""
create tensorlayer input layers from env.space input
:param space: env.space
:return: tensorlayer input layer
"""
if isinstance(space, Box):
return input_layer(space.shape)
elif isinstance(space, Discrete):
return tl.layers.Input(dtype=tf.int32, shape=(None, ))
raise NotImplementedError
def input_layers_from_spaces(*args):
"""
create tensorlayer input layers from a list of env.space inputs
:param args: a list of env.space inputs
:return: tensorlayer input layer list
"""
return [input_layer_from_space(space) for space in args]
def mlp(x, hidden_sizes=(32, ), activation=tf.tanh, output_activation=None):
"""
create Multi-Layer Perception
:param x: tensorlayer input layer
:param hidden_sizes: hidden layer size
:param activation: hidden layer activation function
:param output_activation: activation function for the output layer
:return: output layer
"""
for h in hidden_sizes[:-1]:
x = tl.layers.Dense(n_units=h, act=activation)(x)
return tl.layers.Dense(n_units=hidden_sizes[-1], act=output_activation)(x)
def get_vars(model: tl.models.Model):
"""
get trainable parameters of the model
:param model: tensorlayer model
:return: a list of trainable parameters of the model
"""
return model.trainable_weights
def count_vars(model: tl.models.Model):
"""
count trainable parameters of the model
:param model: tensorlayer model
:return: counts
"""
v = get_vars(model)
return sum([np.prod(var.shape.as_list()) for var in v])
def gaussian_likelihood(x, mu, log_std):
"""
calculate gaussian likelihood
:param x: input distribution
:param mu: mu
:param log_std: log std
:return: gaussian likelihood
"""
pre_sum = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS))**2 + 2 * log_std + np.log(2 * np.pi))
return tf.reduce_sum(pre_sum, axis=1)
def diagonal_gaussian_kl(mu0, log_std0, mu1, log_std1):
"""
tf symbol for mean KL divergence between two batches of diagonal gaussian distributions,
where distributions are specified by means and log stds.
(https://en.wikipedia.org/wiki/Kullback-Leibler_divergence#Multivariate_normal_distributions)
"""
var0, var1 = tf.exp(2 * log_std0), tf.exp(2 * log_std1)
pre_sum = 0.5 * (((mu1 - mu0)**2 + var0) / (var1 + EPS) - 1) + log_std1 - log_std0
all_kls = tf.reduce_sum(pre_sum, axis=1)
return tf.reduce_mean(all_kls)
def categorical_kl(logp0, logp1):
"""
tf symbol for mean KL divergence between two batches of categorical probability distributions,
where the distributions are input as log probs.
"""
all_kls = tf.reduce_sum(tf.exp(logp1) * (logp1 - logp0), axis=1)
return tf.reduce_mean(all_kls)
def flat_concat(xs):
"""
flat concat input
:param xs: a list of tensor
:return: flat tensor
"""
return tf.concat([tf.reshape(x, (-1, )) for x in xs], axis=0)
def assign_params_from_flat(x, params):
"""
assign params from flat input
:param x:
:param params:
:return: group
"""
flat_size = lambda p: int(np.prod(p.shape.as_list())) # the 'int' is important for scalars
splits = tf.split(x, [flat_size(p) for p in params])
new_params = [tf.reshape(p_new, p.shape) for p, p_new in zip(params, splits)]
return tf.group([p.assign(p_new) for p, p_new in zip(params, new_params)])
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
"""
Policies
"""
class MlpCategoricalPolicy:
"""
Categorical Policy for discrete input
"""
def __init__(self, x, a, hidden_sizes, activation, output_activation):
self.act_dim = a.n
x = input_layer_from_space(x)
logits = mlp(x, list(hidden_sizes) + [self.act_dim], activation, None)
self.model = tl.models.Model(x, logits)
self.model.train()
def cal_outputs_0(self, states):
states = states.astype(np.float32)
logits = self.model(states)
logp_all = tf.nn.log_softmax(logits)
pi = tf.squeeze(tfp.distributions.Multinomial(1, logits), axis=1)
logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=self.act_dim) * logp_all, axis=1)
info = {'logp_all': logp_all}
return pi, logp_pi, info, logp_all
def cal_outputs_1(self, states, actions, old_logp_all):
pi, logp_pi, info, logp_all = self.cal_outputs_0(states)
logp = tf.reduce_sum(tf.one_hot(actions, depth=self.act_dim) * logp_all, axis=1)
d_kl = categorical_kl(logp_all, old_logp_all)
info_phs = {'logp_all': old_logp_all}
return pi, logp, logp_pi, info, info_phs, d_kl
class MlpGaussianPolicy:
"""
Gaussian Policy for continuous input
"""
def __init__(self, x, a, hidden_sizes, activation, output_activation):
act_dim = a.shape[0]
x = input_layer_from_space(x)
mu = mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation)
self.model = tl.models.Model(x, mu)
self.model.train()
self._log_std = tf.Variable(-0.5 * np.ones(act_dim, dtype=np.float32))
self.model.trainable_weights.append(self._log_std)
def cal_outputs_0(self, states):
states = states.astype(np.float32)
mu = self.model(states)
std = tf.exp(self._log_std)
pi = mu + tf.random.normal(tf.shape(mu)) * std
logp_pi = gaussian_likelihood(pi, mu, self._log_std)
info = {'mu': mu, 'log_std': self._log_std}
return pi, logp_pi, info, mu, self._log_std
def cal_outputs_1(self, states, actions, old_log_std_ph, old_mu_ph):
pi, logp_pi, info, mu, log_std = self.cal_outputs_0(states)
logp = gaussian_likelihood(actions, mu, log_std)
d_kl = diagonal_gaussian_kl(mu, log_std, old_mu_ph, old_log_std_ph)
info_phs = {'mu': old_mu_ph, 'log_std': old_log_std_ph}
return pi, logp, logp_pi, info, info_phs, d_kl
"""
Actor-Critics
"""
def mlp_actor_critic(
x: 'env.observation_space', a: 'env.action_space', hidden_sizes=(64, 64), activation=tf.tanh,
output_activation=None
):
"""
create actor and critic
:param x: observation space
:param a: action space
:param hidden_sizes: hidden layer size
:param activation: hidden layer activation function
:param output_activation: activation function for the output layer
:return: acter class and critic class
"""
# default policy builder depends on action space
if isinstance(a, Box):
actor = MlpGaussianPolicy(x, a, hidden_sizes, activation, output_activation)
elif isinstance(a, Discrete):
actor = MlpCategoricalPolicy(x, a, hidden_sizes, activation, output_activation)
else:
raise ValueError('action space type error')
class Critic:
def __init__(self, obs_space, hidden_layer_sizes, activation_funcs):
inputs = input_layer_from_space(obs_space)
self.model = tl.models.Model(inputs, mlp(inputs, list(hidden_layer_sizes) + [1], activation_funcs, None))
self.model.train()
def critic_cal_func(self, states):
states = states.astype(np.float32)
return tf.squeeze(self.model(states), axis=1)
critic = Critic(x, hidden_sizes, activation)
return actor, critic
class GAEBuffer:
"""
A buffer for storing trajectories experienced by a TRPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, info_shapes, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.info_bufs = {k: np.zeros([size] + list(v), dtype=np.float32) for k, v in info_shapes.items()}
self.sorted_info_keys = keys_as_sorted_list(self.info_bufs)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp, info):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
for i, k in enumerate(self.sorted_info_keys):
self.info_bufs[k][self.ptr] = info[i]
self.ptr += 1
def finish_path(self, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / adv_std
return [self.obs_buf, self.act_buf, self.adv_buf, self.ret_buf, self.logp_buf
] + values_as_sorted_list(self.info_bufs)
##################### TRPO ####################
"""
Trust Region Policy Optimization
(with support for Natural Policy Gradient)
"""
class TRPO:
"""
trpo class
"""
def __init__(self, obs_space, act_space):
obs_dim = obs_space.shape
act_dim = act_space.shape
# # Main models and functions
self.actor, self.critic = mlp_actor_critic(obs_space, act_space, HIDDEN_SIZES)
if isinstance(act_space, Box):
act_dim = env.action_space.shape[0]
info_shapes = {'mu': [act_dim], 'log_std': [act_dim]}
elif isinstance(env.action_space, Discrete):
act_dim = env.action_space.n
info_shapes = {'logp_all': [act_dim]}
else:
raise Exception('info_shape error')
self.buf = GAEBuffer(obs_dim, act_dim, STEPS_PER_EPOCH, info_shapes, GAMMA, LAM)
# Optimizer for value function
self.critic_optimizer = tf.optimizers.Adam(learning_rate=VF_LR)
# Every step, get: action, value, logprob, & info for pdist (for computing kl div)
def get_action_ops(self, states):
"""
get action
:param states: state input
:return: pi, v, logp_pi and other outputs
"""
pi, logp_pi, info, *_ = self.actor.cal_outputs_0(states)
v = self.critic.critic_cal_func(states)
res0 = [pi, v, logp_pi] + values_as_sorted_list(info)
res = []
for i in res0:
res.append(i + 0) # transfer to tensor
return res
# TRPO losses
def pi_loss(self, inputs):
"""
calculate pi loss
:param inputs: a list of x_ph, a_ph, adv_ph, ret_ph, logp_old_ph and other inputs
:return: pi loss
"""
x_ph, a_ph, adv_ph, ret_ph, logp_old_ph, *info_values = inputs
pi, logp, logp_pi, info, info_phs, d_kl = self.actor.cal_outputs_1(x_ph, a_ph, *info_values)
ratio = tf.exp(logp - logp_old_ph) # pi(a|s) / pi_old(a|s)
pi_loss = -tf.reduce_mean(ratio * adv_ph)
return pi_loss
def v_loss(self, inputs):
"""
calculate value loss
:param inputs: a list of x_ph, a_ph, adv_ph, ret_ph, logp_old_ph and other inputs
:return: v loss
"""
x_ph, a_ph, adv_ph, ret_ph, logp_old_ph, *info_values = inputs
v = self.critic.critic_cal_func(x_ph)
v_loss = tf.reduce_mean((ret_ph - v)**2)
return v_loss
def train_vf(self, inputs):
"""
train v function
:param inputs: a list of x_ph, a_ph, adv_ph, ret_ph, logp_old_ph and other inputs
:return: None
"""
with tf.GradientTape() as tape:
loss = self.v_loss(inputs)
grad = tape.gradient(loss, self.critic.model.trainable_weights)
self.critic_optimizer.apply_gradients(zip(grad, self.critic.model.trainable_weights))
# Symbols needed for CG solver
def gradient(self, inputs):
"""
pi gradients
:param inputs: a list of x_ph, a_ph, adv_ph, ret_ph, logp_old_ph and other inputs
:return: gradient
"""
pi_params = self.actor.model.trainable_weights
with tf.GradientTape() as tape:
loss = self.pi_loss(inputs)
grad = tape.gradient(loss, pi_params)
gradient = flat_concat(grad)
return gradient
def hvp(self, inputs, v_ph):
"""
calculate hvp
:param inputs: a list of x_ph, a_ph, adv_ph, ret_ph, logp_old_ph and other inputs
:param v_ph: v input
:return: hvp
"""
pi_params = self.actor.model.trainable_weights
x_ph, a_ph, adv_ph, ret_ph, logp_old_ph, *info_values = inputs
with tf.GradientTape() as tape1:
with tf.GradientTape() as tape0:
pi, logp, logp_pi, info, info_phs, d_kl = self.actor.cal_outputs_1(x_ph, a_ph, *info_values)
g = flat_concat(tape0.gradient(d_kl, pi_params))
l = tf.reduce_sum(g * v_ph)
hvp = flat_concat(tape1.gradient(l, pi_params))
if DAMPING_COEFF > 0:
hvp += DAMPING_COEFF * v_ph
return hvp
# Symbols for getting and setting params
def get_pi_params(self):
"""
get actor trainable parameters
:return: flat actor trainable parameters
"""
pi_params = self.actor.model.trainable_weights
return flat_concat(pi_params)
def set_pi_params(self, v_ph):
"""
set actor trainable parameters
:param v_ph: inputs
:return: None
"""
pi_params = self.actor.model.trainable_weights
assign_params_from_flat(v_ph, pi_params)
def save_ckpt(self):
"""
save trained weights
:return: None
"""
if not os.path.exists('model'):
os.makedirs('model')
tl.files.save_weights_to_hdf5('model/trpo_actor.hdf5', self.actor.model)
tl.files.save_weights_to_hdf5('model/trpo_critic.hdf5', self.critic.model)
def load_ckpt(self):
"""
load trained weights
:return: None
"""
tl.files.load_hdf5_to_weights_in_order('model/trpo_actor.hdf5', self.actor.model)
tl.files.load_hdf5_to_weights_in_order('model/trpo_critic.hdf5', self.critic.model)
def cg(self, Ax, b):
"""
Conjugate gradient algorithm
(see https://en.wikipedia.org/wiki/Conjugate_gradient_method)
"""
x = np.zeros_like(b)
r = copy.deepcopy(b) # Note: should be 'b - Ax(x)', but for x=0, Ax(x)=0. Change if doing warm start.
p = copy.deepcopy(r)
r_dot_old = np.dot(r, r)
for _ in range(CG_ITERS):
z = Ax(p)
alpha = r_dot_old / (np.dot(p, z) + EPS)
x += alpha * p
r -= alpha * z
r_dot_new = np.dot(r, r)
p = r + (r_dot_new / r_dot_old) * p
r_dot_old = r_dot_new
return x
def update(self):
"""
update trpo
:return:
"""
# Prepare hessian func, gradient eval
inputs = self.buf.get()
Hx = lambda x: self.hvp(inputs, x)
g, pi_l_old, v_l_old = self.gradient(inputs), self.pi_loss(inputs), self.v_loss(inputs)
# Core calculations for TRPO or NPG
x = self.cg(Hx, g)
alpha = np.sqrt(2 * DELTA / (np.dot(x, Hx(x)) + EPS))
old_params = self.get_pi_params()
def set_and_eval(step):
aa = alpha * x * step
par = old_params - aa
self.set_pi_params(par)
x_ph, a_ph, adv_ph, ret_ph, logp_old_ph, *info_values = inputs
pi, logp, logp_pi, info, info_phs, d_kl = self.actor.cal_outputs_1(x_ph, a_ph, *info_values)
loss = self.pi_loss(inputs)
return [d_kl, loss]
# trpo augments npg with backtracking line search, hard kl
for j in range(BACKTRACK_ITERS):
kl, pi_l_new = set_and_eval(step=BACKTRACK_COEFF**j)
if kl <= DELTA and pi_l_new <= pi_l_old:
# Accepting new params at step of line search
break
if j == BACKTRACK_ITERS - 1:
# Line search failed! Keeping old params.
kl, pi_l_new = set_and_eval(step=0.)
# Value function updates
for _ in range(TRAIN_V_ITERS):
self.train_vf(inputs)
if __name__ == '__main__':
tf.random.set_seed(SEED)
np.random.seed(SEED)
env = gym.make(ENV_NAME)
env.seed(SEED)
agent = TRPO(env.observation_space, env.action_space)
if args.train:
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
reward_list = []
# Main loop: collect experience in env and update/log each epoch
for epoch in range(EPOCHS):
t0 = time.time()
rew = 0
for t in range(STEPS_PER_EPOCH):
agent_outs = agent.get_action_ops(o.reshape(1, -1))
a, v_t, logp_t, info_t = np.array(agent_outs[0][0], np.float32), \
np.array(agent_outs[1], np.float32), \
np.array(agent_outs[2], np.float32), \
np.array(agent_outs[3:], np.float32)
# save and log
agent.buf.store(o, a, r, v_t, logp_t, info_t)
o, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
terminal = d or (ep_len == MAX_EP_LEN)
if terminal or (t == STEPS_PER_EPOCH - 1):
if not (terminal):
print('Warning: trajectory cut off by epoch at %d steps.' % ep_len)
# if trajectory didn't reach terminal state, bootstrap value target
last_val = r if d else agent.critic.critic_cal_func(o.reshape(1, -1))
agent.buf.finish_path(last_val)
rew = ep_ret
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# Save model
if (epoch % SAVE_FREQ == 0) or (epoch == EPOCHS - 1):
agent.save_ckpt()
# Perform TRPO or NPG update!
agent.update()
print('epoch [{}/{}] ep_ret: {} time: {}'.format(epoch, EPOCHS, rew, time.time() - t0))
reward_list.append(rew)
plt.clf()
plt.ion()
plt.plot(reward_list)
plt.title('TRPO ' + str(DELTA))
plt.ylim(-2000, 0)
plt.show()
plt.pause(0.1)
agent.save_ckpt()
plt.ioff()
plt.show()
# test
agent.load_ckpt()
while True:
o = env.reset()
for i in range(STEPS_PER_EPOCH):
env.render()
agent_outs = agent.get_action_ops(o.reshape(1, -1))
a, v_t, logp_t, info_t = agent_outs[0][0], agent_outs[1], agent_outs[2], agent_outs[3:]
o, r, d, _ = env.step(a)
if d:
break
| python | 25,920 |
#
# doctest.py: Syntax Highlighting for doctest blocks
# Edward Loper
#
# Created [06/28/03 02:52 AM]
# $Id: restructuredtext.py 1210 2006-04-10 13:25:50Z edloper $
#
"""
Syntax highlighting for doctest blocks. This module defines two
functions, L{doctest_to_html()} and L{doctest_to_latex()}, which can
be used to perform syntax highlighting on doctest blocks. It also
defines the more general C{colorize_doctest()}, which could be used to
do syntac highlighting on doctest blocks with other output formats.
(Both C{doctest_to_html()} and C{doctest_to_latex()} are defined using
C{colorize_doctest()}.)
"""
from __future__ import absolute_import
__docformat__ = 'epytext en'
import re
from epydoc.util import plaintext_to_html, plaintext_to_latex
__all__ = ['doctest_to_html', 'doctest_to_latex',
'DoctestColorizer', 'XMLDoctestColorizer',
'HTMLDoctestColorizer', 'LaTeXDoctestColorizer']
def doctest_to_html(s):
"""
Perform syntax highlighting on the given doctest string, and
return the resulting HTML code. This code consists of a C{<pre>}
block with class=py-doctest. Syntax highlighting is performed
using the following css classes:
- C{py-prompt} -- the Python PS1 prompt (>>>)
- C{py-more} -- the Python PS2 prompt (...)
- C{py-keyword} -- a Python keyword (for, if, etc.)
- C{py-builtin} -- a Python builtin name (abs, dir, etc.)
- C{py-string} -- a string literal
- C{py-comment} -- a comment
- C{py-except} -- an exception traceback (up to the next >>>)
- C{py-output} -- the output from a doctest block.
- C{py-defname} -- the name of a function or class defined by
a C{def} or C{class} statement.
"""
return HTMLDoctestColorizer().colorize_doctest(s)
def doctest_to_latex(s):
"""
Perform syntax highlighting on the given doctest string, and
return the resulting LaTeX code. This code consists of an
C{alltt} environment. Syntax highlighting is performed using
the following new latex commands, which must be defined externally:
- C{\pysrcprompt} -- the Python PS1 prompt (>>>)
- C{\pysrcmore} -- the Python PS2 prompt (...)
- C{\pysrckeyword} -- a Python keyword (for, if, etc.)
- C{\pysrcbuiltin} -- a Python builtin name (abs, dir, etc.)
- C{\pysrcstring} -- a string literal
- C{\pysrccomment} -- a comment
- C{\pysrcexcept} -- an exception traceback (up to the next >>>)
- C{\pysrcoutput} -- the output from a doctest block.
- C{\pysrcdefname} -- the name of a function or class defined by
a C{def} or C{class} statement.
"""
return LaTeXDoctestColorizer().colorize_doctest(s)
class DoctestColorizer:
"""
An abstract base class for performing syntax highlighting on
doctest blocks and other bits of Python code. Subclasses should
provide definitions for:
- The L{markup()} method, which takes a substring and a tag, and
returns a colorized version of the substring.
- The L{PREFIX} and L{SUFFIX} variables, which will be added
to the beginning and end of the strings returned by
L{colorize_codeblock} and L{colorize_doctest}.
"""
#: A string that is added to the beginning of the strings
#: returned by L{colorize_codeblock} and L{colorize_doctest}.
#: Typically, this string begins a preformatted area.
PREFIX = None
#: A string that is added to the end of the strings
#: returned by L{colorize_codeblock} and L{colorize_doctest}.
#: Typically, this string ends a preformatted area.
SUFFIX = None
#: The string used to divide lines
NEWLINE = '\n'
#: A list of the names of all Python keywords. ('as' is included
#: even though it is technically not a keyword.)
_KEYWORDS = ("and del for is raise"
"assert elif from lambda return"
"break else global not try"
"class except if or while"
"continue exec import pass yield"
"def finally in print as").split()
#: A list of all Python builtins.
_BUILTINS = [_BI for _BI in dir(__builtins__)
if not _BI.startswith('__')]
#: A regexp group that matches keywords.
_KEYWORD_GRP = '|'.join([r'\b%s\b' % _KW for _KW in _KEYWORDS])
#: A regexp group that matches Python builtins.
_BUILTIN_GRP = (r'(?<!\.)(?:%s)' % '|'.join([r'\b%s\b' % _BI
for _BI in _BUILTINS]))
#: A regexp group that matches Python strings.
_STRING_GRP = '|'.join(
[r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))',
r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"])
#: A regexp group that matches Python comments.
_COMMENT_GRP = '(#.*?$)'
#: A regexp group that matches Python ">>>" prompts.
_PROMPT1_GRP = r'^[ \t]*>>>(?:[ \t]|$)'
#: A regexp group that matches Python "..." prompts.
_PROMPT2_GRP = r'^[ \t]*\.\.\.(?:[ \t]|$)'
#: A regexp group that matches function and class definitions.
_DEFINE_GRP = r'\b(?:def|class)[ \t]+\w+'
#: A regexp that matches Python prompts
PROMPT_RE = re.compile('(%s|%s)' % (_PROMPT1_GRP, _PROMPT2_GRP),
re.MULTILINE | re.DOTALL)
#: A regexp that matches Python "..." prompts.
PROMPT2_RE = re.compile('(%s)' % _PROMPT2_GRP,
re.MULTILINE | re.DOTALL)
#: A regexp that matches doctest exception blocks.
EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*',
re.DOTALL | re.MULTILINE)
#: A regexp that matches doctest directives.
DOCTEST_DIRECTIVE_RE = re.compile(r'#[ \t]*doctest:.*')
#: A regexp that matches all of the regions of a doctest block
#: that should be colored.
DOCTEST_RE = re.compile(
r'(.*?)((?P<STRING>%s)|(?P<COMMENT>%s)|(?P<DEFINE>%s)|'
r'(?P<KEYWORD>%s)|(?P<BUILTIN>%s)|'
r'(?P<PROMPT1>%s)|(?P<PROMPT2>%s)|(?P<EOS>\Z))' % (
_STRING_GRP, _COMMENT_GRP, _DEFINE_GRP, _KEYWORD_GRP, _BUILTIN_GRP,
_PROMPT1_GRP, _PROMPT2_GRP), re.MULTILINE | re.DOTALL)
#: This regular expression is used to find doctest examples in a
#: string. This is copied from the standard Python doctest.py
#: module (after the refactoring in Python 2.4+).
DOCTEST_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)* # PS2 lines
\n?)
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
def colorize_inline(self, s):
"""
Colorize a string containing Python code. Do not add the
L{PREFIX} and L{SUFFIX} strings to the returned value. This
method is intended for generating syntax-highlighted strings
that are appropriate for inclusion as inline expressions.
"""
return self.DOCTEST_RE.sub(self.subfunc, s)
def colorize_codeblock(self, s):
"""
Colorize a string containing only Python code. This method
differs from L{colorize_doctest} in that it will not search
for doctest prompts when deciding how to colorize the string.
"""
body = self.DOCTEST_RE.sub(self.subfunc, s)
return self.PREFIX + body + self.SUFFIX
def colorize_doctest(self, s, strip_directives=False):
"""
Colorize a string containing one or more doctest examples.
"""
output = []
charno = 0
for m in self.DOCTEST_EXAMPLE_RE.finditer(s):
# Parse the doctest example:
pysrc, want = m.group('source', 'want')
# Pre-example text:
output.append(self.NEWLINE.join(s[charno:m.start()].split('\n')))
# Example source code:
output.append(self.DOCTEST_RE.sub(self.subfunc, pysrc))
# Example output:
if want:
if self.EXCEPT_RE.match(want):
output += self.NEWLINE.join([self.markup(line, 'except')
for line in want.split('\n')])
else:
output += self.NEWLINE.join([self.markup(line, 'output')
for line in want.split('\n')])
# Update charno
charno = m.end()
# Add any remaining post-example text.
output.append(self.NEWLINE.join(s[charno:].split('\n')))
return self.PREFIX + ''.join(output) + self.SUFFIX
def subfunc(self, match):
other, text = match.group(1, 2)
#print('M %20r %20r' % (other, text)) # <- for debugging
if other:
other = self.NEWLINE.join([self.markup(line, 'other')
for line in other.split('\n')])
if match.group('PROMPT1'):
return other + self.markup(text, 'prompt')
elif match.group('PROMPT2'):
return other + self.markup(text, 'more')
elif match.group('KEYWORD'):
return other + self.markup(text, 'keyword')
elif match.group('BUILTIN'):
return other + self.markup(text, 'builtin')
elif match.group('COMMENT'):
return other + self.markup(text, 'comment')
elif match.group('STRING') and '\n' not in text:
return other + self.markup(text, 'string')
elif match.group('STRING'):
# It's a multiline string; colorize the string & prompt
# portion of each line.
pieces = []
for line in text.split('\n'):
if self.PROMPT2_RE.match(line):
if len(line) > 4:
pieces.append(self.markup(line[:4], 'more') +
self.markup(line[4:], 'string'))
else:
pieces.append(self.markup(line[:4], 'more'))
elif line:
pieces.append(self.markup(line, 'string'))
else:
pieces.append('')
return other + self.NEWLINE.join(pieces)
elif match.group('DEFINE'):
m = re.match('(?P<def>\w+)(?P<space>\s+)(?P<name>\w+)', text)
return other + (self.markup(m.group('def'), 'keyword') +
self.markup(m.group('space'), 'other') +
self.markup(m.group('name'), 'defname'))
elif match.group('EOS') is not None:
return other
else:
assert 0, 'Unexpected match!'
def markup(self, s, tag):
"""
Apply syntax highlighting to a single substring from a doctest
block. C{s} is the substring, and C{tag} is the tag that
should be applied to the substring. C{tag} will be one of the
following strings:
- C{prompt} -- the Python PS1 prompt (>>>)
- C{more} -- the Python PS2 prompt (...)
- C{keyword} -- a Python keyword (for, if, etc.)
- C{builtin} -- a Python builtin name (abs, dir, etc.)
- C{string} -- a string literal
- C{comment} -- a comment
- C{except} -- an exception traceback (up to the next >>>)
- C{output} -- the output from a doctest block.
- C{defname} -- the name of a function or class defined by
a C{def} or C{class} statement.
- C{other} -- anything else (does *not* include output.)
"""
raise AssertionError("Abstract method")
class XMLDoctestColorizer(DoctestColorizer):
"""
A subclass of DoctestColorizer that generates XML-like output.
This class is mainly intended to be used for testing purposes.
"""
PREFIX = '<colorized>\n'
SUFFIX = '</colorized>\n'
def markup(self, s, tag):
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if tag == 'other': return s
else: return '<%s>%s</%s>' % (tag, s, tag)
class HTMLDoctestColorizer(DoctestColorizer):
"""A subclass of DoctestColorizer that generates HTML output."""
PREFIX = '<pre class="py-doctest">\n'
SUFFIX = '</pre>\n'
def markup(self, s, tag):
if tag == 'other':
return plaintext_to_html(s)
else:
return ('<span class="py-%s">%s</span>' %
(tag, plaintext_to_html(s)))
class LaTeXDoctestColorizer(DoctestColorizer):
"""A subclass of DoctestColorizer that generates LaTeX output."""
PREFIX = ('\\begin{alltt}')
SUFFIX = '\\end{alltt}\n'
NEWLINE = '\\\\'
def markup(self, s, tag):
if tag == 'other':
return plaintext_to_latex(s)
else:
return '\\pysrc%s{%s}' % (tag, plaintext_to_latex(s))
| python | 13,257 |
# Main File - Driver for the Application PcapXray
# Import Libraries
import os #-- default lib - packed with python
import sys #-- default lib
import datetime #-- default lib
if sys.platform == 'darwin':
import matplotlib
matplotlib.use('TkAgg')
from cefpython3 import cefpython as cef
try:
# for Python2
from Tkinter import *
import ttk
except ImportError:
# for Python3
from tkinter import *
from tkinter import ttk
# Import Custom Modules - Self created by the author
if sys.path[0]:
sys.path.insert(0,sys.path[0]+'/Module/')
else:
sys.path.insert(0, 'Module/')
import user_interface
# Import 3rd party Libraries -- Needed to be installed using pip
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def main():
base = Tk()
logo_file = os.path.join(os.path.dirname(__file__), 'Module/assets/logo.gif')
icon = PhotoImage(file=logo_file)
base.tk.call('wm','iconphoto',base._w,icon)
user_interface.pcapXrayGui(base)
cef.Initialize()
base.mainloop()
cef.Shutdown()
main()
| python | 1,077 |
import sys
import re
import json
import subprocess
# $node_conversion -i node0_3_CPU0
# sysmgr_show -o -p optics_ma -n 768
procs_list = {'coherent_driver': [],
'fia_driver': [],
'ifmgr' : [],
'optics_ma' : [],
'optics_ea' : ['coh_aipc_client'],
'otn_ma' : [],
'otn_ea' : ['coh_aipc_client', 'icpe_local_ea'],
'ttt' : ['ggg']
}
class process_state:
def __init__(self, proc_name_list, nodeid):
self.proc_name_list = proc_name_list
self.nodeid = nodeid
self.proc_state = {}
def get_process_jid (self, output_lines):
jid = -1
for line in output_lines:
jid_line = line.find('Job Id')
if jid_line != -1:
jid = line.split(':')
if len(jid) == 2:
jid = jid[1].lstrip().rstrip()
break
return jid
def find_threads_running(self, proc_name, output_lines, threads):
running = {}
if len(threads) == 0:
running[proc_name] = 1
return running
for thread in threads:
running[thread] = 0
for line in output_lines:
for thread in threads:
if line.find(thread) != -1:
running[thread] = 1
return running
def get_process_output (self, proc_name, nodeid):
output = run_command('/pkg/sbin/sysmgr_show -o -p ' + proc_name + ' -n ' + str(nodeid))
return output
def check_proc_state (self):
for process, threads in self.proc_name_list.items():
proc = {}
output = self.get_process_output(process, self.nodeid)
output_lines = output.split('\n')
jid = self.get_process_jid(output_lines)
proc['jid'] = jid
running =self.find_threads_running(process, output_lines, threads)
proc['threads'] = running
for run, val in running.items():
if val == 0:
proc['good_state'] = 0
break
else:
proc['good_state'] = 1
self.proc_state[process] = proc
def dump_proc_state(self):
print json.dumps(self.proc_state, sort_keys=True, indent=4)
def run_command(command):
try:
output = subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError:
output = ''
return output
def get_node_id_from_node_name(node_name):
node_id = 0
name = 'node' + re.sub('/', '_', node_name)
output = run_command('/pkg/bin/node_conversion' + ' -i ' + name)
try:
node_id = int(output)
except ValueError:
node_id = 0
return node_id
if len(sys.argv) < 2:
exit(-1)
node_id = get_node_id_from_node_name(sys.argv[1])
proc = process_state(procs_list, node_id)
proc.check_proc_state()
proc.dump_proc_state()
| python | 2,987 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppInvoiceAuthUnsignModel(object):
def __init__(self):
self._authorization_type = None
self._extend_fields = None
self._m_short_name = None
self._user_id = None
@property
def authorization_type(self):
return self._authorization_type
@authorization_type.setter
def authorization_type(self, value):
self._authorization_type = value
@property
def extend_fields(self):
return self._extend_fields
@extend_fields.setter
def extend_fields(self, value):
self._extend_fields = value
@property
def m_short_name(self):
return self._m_short_name
@m_short_name.setter
def m_short_name(self, value):
self._m_short_name = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.authorization_type:
if hasattr(self.authorization_type, 'to_alipay_dict'):
params['authorization_type'] = self.authorization_type.to_alipay_dict()
else:
params['authorization_type'] = self.authorization_type
if self.extend_fields:
if hasattr(self.extend_fields, 'to_alipay_dict'):
params['extend_fields'] = self.extend_fields.to_alipay_dict()
else:
params['extend_fields'] = self.extend_fields
if self.m_short_name:
if hasattr(self.m_short_name, 'to_alipay_dict'):
params['m_short_name'] = self.m_short_name.to_alipay_dict()
else:
params['m_short_name'] = self.m_short_name
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppInvoiceAuthUnsignModel()
if 'authorization_type' in d:
o.authorization_type = d['authorization_type']
if 'extend_fields' in d:
o.extend_fields = d['extend_fields']
if 'm_short_name' in d:
o.m_short_name = d['m_short_name']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| python | 2,568 |
import uuid
from opentracing import Format
from thundra import constants
from thundra.config import config_names
from thundra.config.config_provider import ConfigProvider
from thundra.plugins.invocation import invocation_support, invocation_trace_support
from thundra.utils import get_normalized_path
def start_trace(execution_context, tracer, class_name, domain_name, request, request_route_path=None):
propagated_span_context = tracer.extract(Format.HTTP_HEADERS, request.get('headers'))
trace_id = str(uuid.uuid4())
incoming_span_id = None
if propagated_span_context:
trace_id = propagated_span_context.trace_id
incoming_span_id = propagated_span_context.span_id
# Start root span
url_path_depth = ConfigProvider.get(config_names.THUNDRA_TRACE_INTEGRATIONS_HTTP_URL_DEPTH)
normalized_path = get_normalized_path(request.get('path'), url_path_depth)
operation_name = request_route_path or normalized_path
scope = tracer.start_active_span(operation_name=operation_name,
child_of=propagated_span_context,
start_time=execution_context.start_timestamp,
finish_on_close=False,
trace_id=trace_id,
transaction_id=execution_context.transaction_id,
execution_context=execution_context)
root_span = scope.span
# Set root span class and domain names
root_span.class_name = class_name
root_span.domain_name = domain_name
# Add root span tags
execution_context.span_id = root_span.context.span_id
root_span.on_started()
root_span.set_tag(constants.HttpTags['HTTP_METHOD'], request.get('method'))
root_span.set_tag(constants.HttpTags['HTTP_HOST'], request.get('host', ''))
root_span.set_tag(constants.HttpTags['QUERY_PARAMS'], request.get('query_params'))
root_span.set_tag(constants.HttpTags['HTTP_PATH'], request.get('path'))
if not ConfigProvider.get(config_names.THUNDRA_TRACE_REQUEST_SKIP, True):
root_span.set_tag(constants.HttpTags['BODY'], request.get('body'))
execution_context.root_span = root_span
execution_context.scope = scope
execution_context.trace_id = trace_id
if request_route_path:
trigger_operation_name = request.get('host', '') + request_route_path
else:
trigger_operation_name = request.get('headers').get(constants.TRIGGER_RESOURCE_NAME_TAG) or \
request.get('host', '') + normalized_path
execution_context.application_resource_name = request_route_path or normalized_path
invocation_support.set_agent_tag(constants.SpanTags['TRIGGER_OPERATION_NAMES'], [trigger_operation_name])
execution_context.trigger_operation_name = trigger_operation_name
invocation_support.set_agent_tag(constants.HttpTags['HTTP_METHOD'], request.get('method'))
invocation_support.set_agent_tag(constants.SpanTags['TRIGGER_DOMAIN_NAME'], 'API')
invocation_support.set_agent_tag(constants.SpanTags['TRIGGER_CLASS_NAME'], 'HTTP')
if incoming_span_id:
invocation_trace_support.add_incoming_trace_link(incoming_span_id)
def update_application_info(application_info_provider, application_info, app_class_name):
application_info_provider.update({
'applicationName': application_info.get('applicationName', 'thundra-app'),
'applicationClassName': app_class_name,
'applicationDomainName': 'API',
'applicationInstanceId': application_info.get('applicationInstanceId',
str(uuid.uuid4())),
'applicationId': 'python:{}:{}:{}'.format(app_class_name,
application_info.get('applicationRegion', ''),
application_info.get('applicationName',
'thundra-app'))
})
def process_request_route(execution_context, request_route_path, request_host):
if request_route_path and execution_context and execution_context.scope:
trigger_operation_name = request_host + request_route_path
execution_context.scope.span.operation_name = request_route_path
execution_context.trigger_operation_name = trigger_operation_name
execution_context.application_resource_name = request_route_path
invocation_support.set_agent_tag(constants.SpanTags['TRIGGER_OPERATION_NAMES'], [trigger_operation_name])
def finish_trace(execution_context):
root_span = execution_context.root_span
scope = execution_context.scope
try:
root_span.finish(f_time=execution_context.finish_timestamp)
except Exception:
# TODO: handle root span finish errors
pass
finally:
scope.close()
| python | 4,918 |
"""
PRACTICE Exam 3.
This problem provides practice at:
*** FOR and WHILE loops. ***
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Loki Strain.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
###############################################################################
import simple_testing as st
import math
def main():
""" Calls the TEST functions in this module. """
run_test_practice_problem3()
# -----------------------------------------------------------------------------
# Students: Some of the testing code below uses SimpleTestCase objects,
# from the imported simple_testing (st) module.
# -----------------------------------------------------------------------------
def run_test_practice_problem3():
""" Tests the practice_problem3 function. """
###########################################################################
# TODO: 2. Implement this TEST function.
# It TESTS the practice_problem3 function defined below.
# Include at least ** 2 ** ADDITIONAL tests beyond those we wrote.
#
# Try to choose tests that might expose errors in your code!
#
# As usual, include both EXPECTED and ACTUAL results in your tests
# and compute the latter BY HAND (not by running your program).
###########################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 3
# TIME ESTIMATE: 10 minutes.
###########################################################################
# -------------------------------------------------------------------------
# 13 tests, plus a 14th after these.
# They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem3(-2, 2, 1.3)
# and compare the returned value against [1, 7] (the correct answer).
# -------------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem3,
[-2, 2, 1.3],
[1, 7]),
st.SimpleTestCase(practice_problem3,
[-5, 3, 0.25],
[-5, 0, 1]),
st.SimpleTestCase(practice_problem3,
[-5, 4, 0.25],
[-5, 0, 1, 2]),
st.SimpleTestCase(practice_problem3,
[-5, 5, 0.25],
[-5, 0, 1, 2, 6]),
st.SimpleTestCase(practice_problem3,
[-5, 6, 0.25],
[-5, 0, 1, 2, 6, 7]),
st.SimpleTestCase(practice_problem3,
[-5, 7, 0.25],
[-5, 0, 1, 2, 6, 7, 8]),
st.SimpleTestCase(practice_problem3,
[-3, 3, -1.0],
[-1, 0, 1]),
st.SimpleTestCase(practice_problem3,
[-3, 4, -1.0],
[-1, 0, 1, 2]),
st.SimpleTestCase(practice_problem3,
[-3, 5, -1.0],
[-1, 0, 1, 2, 3]),
st.SimpleTestCase(practice_problem3,
[-3, 6, -1.0],
[-1, 0, 1, 2, 3, 5]),
st.SimpleTestCase(practice_problem3,
[30, 0, -1000],
[]),
st.SimpleTestCase(practice_problem3,
[100, 5, 1.414],
[139, 183, 516, 560, 849]),
st.SimpleTestCase(practice_problem3,
[0, 1, 1.414213562373],
[286602]),
]
# 14th test:
big_list = []
for k in range(888, 1888):
big_list.append(k)
tests.append(st.SimpleTestCase(practice_problem3,
[888, 1000,
- math.sqrt(2) - 0.00000000001],
big_list))
# -------------------------------------------------------------------------
# Run the 14 tests in the tests list constructed above.
# -------------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem3', tests)
###########################################################################
# TO DO 2 continued: More tests:
# YOU add at least ** 2 ** additional tests here.
#
# You can use the SimpleTestCase class as above, or use
# the ordinary expected/actual way, your choice.
#
# SUGGESTION: Ask an assistant to CHECK your tests to confirm
# that they are adequate tests!
###########################################################################
def practice_problem3(start, n, threshold):
"""
What comes in:
-- An integer: start
-- An nonnegative integer: n
-- A number: threshold
What goes out: Returns a list of the first n integers,
starting at start, for which the sum of the integer's
sine and cosine is bigger than the given threshold.
Side effects: None.
Examples:
practice_problem3(-2, 2, 1.3) returns [1, 7]
as you can see if you work through this example using
the numbers presented below. (Do so!)
For these examples, the following (and more) numbers
(each is rounded to 2 decimal places for the sake of brevity)
are relevant:
-5: sin = 0.96, cos = 0.28, sum = 1.24
-4: sin = 0.76, cos = -0.65, sum = 0.10
-3: sin = -0.14, cos = -0.99, sum = -1.13
-2: sin = -0.91, cos = -0.42, sum = -1.33
-1: sin = -0.84, cos = 0.54, sum = -0.30
0: sin = 0.00, cos = 1.00, sum = 1.00
1: sin = 0.84, cos = 0.54, sum = 1.38
2: sin = 0.91, cos = -0.42, sum = 0.49
3: sin = 0.14, cos = -0.99, sum = -0.85
4: sin = -0.76, cos = -0.65, sum = -1.41
5: sin = -0.96, cos = 0.28, sum = -0.68
6: sin = -0.28, cos = 0.96, sum = 0.68
7: sin = 0.66, cos = 0.75, sum = 1.41
8: sin = 0.99, cos = -0.15, sum = 0.84
9: sin = 0.41, cos = -0.91, sum = -0.50
10: sin = -0.54, cos = -0.84, sum = -1.38
11: sin = -1.00, cos = 0.00, sum = -1.00
12: sin = -0.54, cos = 0.84, sum = 0.31
13: sin = 0.42, cos = 0.91, sum = 1.33
So if start is -5 and threshold is 0.25 and:
-- n is 3, then this function returns [-5, 0, 1]
because sin(-5) + cos(-5) IS > 0.25 and
sin(-4) + cos(-4) is NOT > 0.25 and
sin(-3) + cos(-3) is NOT > 0.25 and
sin(-2) + cos(-2) is NOT > 0.25 and
sin(-1) + cos(-1) is NOT > 0.25 and
sin(0) + cos(0) IS > 0.25 and
sin(1) + cos(1) IS > 0.25 and
and that makes the required 3 such numbers.
-- n is 4, then this function returns [-5, 0, 1, 2]
-- n is 5, then this function returns [-5, 0, 1, 2, 6]
-- n is 6, then this function returns [-5, 0, 1, 2, 6, 7]
-- n is 7, then this function returns [-5, 0, 1, 2, 6, 7, 8]
while if start is -3 and the threshold is -1.0 and:
-- n is 3, then this function returns [-1, 0, 1]
-- n is 4, then this function returns [-1, 0, 1, 2]
-- n is 5, then this function returns [-1, 0, 1, 2, 3]
-- n is 6, then this function returns [-1, 0, 1, 2, 3, 5]
and if n is 0 (regardless of what start is),
this function returns []
and if threshold is more than the square root of 2,
this function returns (regardless of what start and n are):
[start, start + 1, start + 2, ... start + n - 1].
Type hints:
:type start: int
:type n: int
:type threshold: float
"""
###########################################################################
# DONE: 3. Implement and test this function.
# Some tests are already written for you (above),
# but you are required to write ADDITIONAL tests (above).
###########################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: < 15 minutes.
###########################################################################
list1 = []
k = start
while True:
if math.sin(k)+math.cos(k) > threshold:
list1 = list1 + [k]
if len(list1) == n:
return list1
k = k + 1
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| python | 10,189 |
def print_full_name(a, b):
print("Hello "+a +" " +b+"! You just delved into python.")
| python | 90 |
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
class UsingURLPatterns(object):
"""
Isolates URL patterns used during testing on the test class itself.
For example:
class MyTestCase(UsingURLPatterns, TestCase):
urlpatterns = [
...
]
def test_something(self):
...
"""
urls = __name__
def setUp(self):
global urlpatterns
urlpatterns = self.urlpatterns
def tearDown(self):
global urlpatterns
urlpatterns = []
class MockObject(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
for key, val in kwargs.items():
setattr(self, key, val)
def __str__(self):
kwargs_str = ', '.join([
'%s=%s' % (key, value)
for key, value in sorted(self._kwargs.items())
])
return '<MockObject %s>' % kwargs_str
class MockQueryset(object):
def __init__(self, iterable):
self.items = iterable
def get(self, **lookup):
for item in self.items:
if all([
getattr(item, key, None) == value
for key, value in lookup.items()
]):
return item
raise ObjectDoesNotExist()
class BadType(object):
"""
When used as a lookup with a `MockQueryset`, these objects
will raise a `TypeError`, as occurs in Django when making
queryset lookups with an incorrect type for the lookup value.
"""
def __eq__(self):
raise TypeError()
def mock_reverse(view_name, args=None, kwargs=None, request=None, format=None):
args = args or []
kwargs = kwargs or {}
value = (args + list(kwargs.values()) + ['-'])[0]
prefix = 'http://example.org' if request else ''
suffix = ('.' + format) if (format is not None) else ''
return '%s/%s/%s%s/' % (prefix, view_name, value, suffix)
def fail_reverse(view_name, args=None, kwargs=None, request=None, format=None):
raise NoReverseMatch()
| python | 2,061 |
"""JustCloud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Dashboard import views as dashboard
from Detritus import views as detritus
from Keystone import views as keystone
from Lumiere import views as lumiere
from Parquer import views as parquer
from Jinger import views as jinger
from Others import views as other
urlpatterns = [
path('databaseadmin/', admin.site.urls),
path('dome/', dashboard.dome), # 测试效果页面....
path('', dashboard.index, name='index'),
path('register/', dashboard.register, name="register"),
path('admin/', keystone.admin_no, name='admin_on'),
path('login/', dashboard.login, name='login'),
path('logout/', dashboard.logout, name='logout'),
path('home/', keystone.home_no, name='home_no'),
path('onenet/', keystone.onenet, name="onenet"),
path('getwaring/', keystone.getwaring, name="getwaring"),
path('api/onenet/', keystone.onenetDataIn, name="oneNETDataIn"),
path('api/test/', keystone.onenetDataTest, name="apiTest"),
path('admin/<username>/', keystone.mainAdmin, name='mainAdmin'),
path('admin/<username>/systemcreate/', keystone.systemCreate, name='systemCreate'),
path('admin/<username>/systemremove/', keystone.systemRemove, name='systemRemove'),
path('admin/<username>/device/', keystone.deviceAdmin, name='deviceAdmin'),
path('admin/<username>/device/deviceremove/', keystone.deviceRemove, name='deviceRemove'),
path('admin/<username>/device/deviceadd/', keystone.deviceAdd, name='deviceAdd'),
path('home/<username>/', keystone.mainHome, name='mainHome'),
path('home/<username>/auth/', keystone.authHome, name='authHome'),
path('home/<username>/center/', keystone.centerHome, name='centerHome'),
path('home/<username>/domainchange/', keystone.domainChange, name='domainChange'),
path('home/<username>/useradd/', keystone.userAdd, name='userAdd'),
path('home/<username>/center/userchange/', keystone.userChange, name='userChange'),
path('home/<username>/userremove/', keystone.userRemove, name='userRemove'),
path('home/<username>/auth/adminremove/', keystone.adminRemove, name='adminRemove'),
path('home/<username>/auth/adminadd/', keystone.adminAdd, name='adminAdd'),
path('home/<username>/center/passwordchange/', keystone.passwordChange, name='passwordChange'),
path('system/<username>/Jinger/<int:sid>/', jinger.systemMain, name='JingerMain'),
path('system/<username>/Jinger/<int:sid>/analy/', jinger.systemAnaly, name='JingerAnaly'),
path('system/<username>/Jinger/<int:sid>/analy/waringremove/', jinger.waringRemove, name='waringRemove'),
path('system/<username>/Jinger/<int:sid>/device/', jinger.systemDevice, name='systemDevice'),
path('system/<username>/Jinger/<int:sid>/device/deviceremove/', jinger.deviceRemove, name='deviceRemove'),
path('system/<username>/Jinger/<int:sid>/device/deviceadd/', jinger.deviceAdd, name='deviceAdd'),
path('system/<username>/Jinger/<int:sid>/device/<int:did>/', jinger.deviceDetail, name='deviceDetail'),
path('system/<username>/Jinger/<int:sid>/device/<int:did>/getnewdevicemap/', jinger.newDeviceMap,
name='newDeviceMap'),
path('system/<username>/Jinger/<int:sid>/device/<int:did>/waringremove/', jinger.waringRemove, name='waringRemove'),
path('system/<username>/Jinger/<int:sid>/type/', jinger.dataType, name='dataType'),
path('system/<username>/Jinger/<int:sid>/push/', jinger.systemPush, name='systemPush'),
path('system/<username>/Jinger/<int:sid>/push/pushadd/', jinger.pushAdd, name='pushAdd'),
path('system/<username>/Jinger/<int:sid>/push/pushaddall/', jinger.pushAddAll, name='pushAddAll'),
path('system/<username>/Jinger/<int:sid>/pull/', jinger.systemPull, name='systemPull'),
path('system/<username>/Detritus/<int:sid>/', detritus.systemMain, name='DetritusMain'),
path('system/<username>/Detritus/<int:sid>/analy/', detritus.systemAnaly, name='DetritusAnaly'),
path('system/<username>/Detritus/<int:sid>/analy/waringremove/', detritus.waringRemove, name='waringRemove'),
path('system/<username>/Detritus/<int:sid>/device/', detritus.systemDevice, name='systemDevice'),
path('system/<username>/Detritus/<int:sid>/device/deviceremove/', detritus.deviceRemove, name='deviceRemove'),
path('system/<username>/Detritus/<int:sid>/device/deviceadd/', detritus.deviceAdd, name='deviceAdd'),
path('system/<username>/Detritus/<int:sid>/device/<int:did>/', detritus.deviceDetail, name='deviceDetail'),
path('system/<username>/Detritus/<int:sid>/device/<int:did>/getnewdevicemap/', detritus.newDeviceMap,
name='newDeviceMap'),
path('system/<username>/Detritus/<int:sid>/device/<int:did>/waringremove/', detritus.waringRemove,
name='waringRemove'),
path('system/<username>/Detritus/<int:sid>/type/', detritus.dataType, name='dataType'),
path('system/<username>/Detritus/<int:sid>/push/', detritus.systemPush, name='systemPush'),
path('system/<username>/Detritus/<int:sid>/push/pushadd/', detritus.pushAdd, name='pushAdd'),
path('system/<username>/Detritus/<int:sid>/push/pushaddall/', detritus.pushAddAll, name='pushAddAll'),
path('system/<username>/Detritus/<int:sid>/pull/', detritus.systemPull, name='systemPull'),
path('system/<username>/Lumiere/<int:sid>/', lumiere.systemMain, name='LumiereMain'),
path('system/<username>/Lumiere/<int:sid>/analy/', lumiere.systemAnaly, name='LumiereAnaly'),
path('system/<username>/Lumiere/<int:sid>/analy/waringremove/', lumiere.waringRemove, name='waringRemove'),
path('system/<username>/Lumiere/<int:sid>/device/', lumiere.systemDevice, name='systemDevice'),
path('system/<username>/Lumiere/<int:sid>/device/deviceremove/', lumiere.deviceRemove, name='deviceRemove'),
path('system/<username>/Lumiere/<int:sid>/device/deviceadd/', lumiere.deviceAdd, name='deviceAdd'),
path('system/<username>/Lumiere/<int:sid>/device/<int:did>/', lumiere.deviceDetail, name='deviceDetail'),
path('system/<username>/Lumiere/<int:sid>/device/<int:did>/getnewdevicemap/', lumiere.newDeviceMap,
name='newDeviceMap'),
path('system/<username>/Lumiere/<int:sid>/device/<int:did>/waringremove/', lumiere.waringRemove,
name='waringRemove'),
path('system/<username>/Lumiere/<int:sid>/type/', lumiere.dataType, name='dataType'),
path('system/<username>/Lumiere/<int:sid>/push/', lumiere.systemPush, name='systemPush'),
path('system/<username>/Lumiere/<int:sid>/push/pushadd/', lumiere.pushAdd, name='pushAdd'),
path('system/<username>/Lumiere/<int:sid>/push/pushaddall/', lumiere.pushAddAll, name='pushAddAll'),
path('system/<username>/Lumiere/<int:sid>/pull/', lumiere.systemPull, name='systemPull'),
path('system/<username>/Parquer/<int:sid>/', parquer.systemMain, name='ParquerMain'),
path('system/<username>/Parquer/<int:sid>/analy/', parquer.systemAnaly, name='ParquerAnaly'),
path('system/<username>/Parquer/<int:sid>/analy/waringremove/', parquer.waringRemove, name='waringRemove'),
path('system/<username>/Parquer/<int:sid>/analy/getfreecount/', parquer.freeCount, name='freeCount'),
path('system/<username>/Parquer/<int:sid>/device/', parquer.systemDevice, name='systemDevice'),
path('system/<username>/Parquer/<int:sid>/device/deviceremove/', parquer.deviceRemove, name='deviceRemove'),
path('system/<username>/Parquer/<int:sid>/device/deviceadd/', parquer.deviceAdd, name='deviceAdd'),
path('system/<username>/Parquer/<int:sid>/device/<int:did>/', parquer.deviceDetail, name='deviceDetail'),
path('system/<username>/Parquer/<int:sid>/device/<int:did>/getnewdevicemap/', parquer.newDeviceMap,
name='newDeviceMap'),
path('system/<username>/Parquer/<int:sid>/device/<int:did>/waringremove/', parquer.waringRemove,
name='waringRemove'),
path('system/<username>/Parquer/<int:sid>/type/', parquer.dataType, name='dataType'),
path('system/<username>/Parquer/<int:sid>/push/', parquer.systemPush, name='systemPush'),
path('system/<username>/Parquer/<int:sid>/push/pushadd/', parquer.pushAdd, name='pushAdd'),
path('system/<username>/Parquer/<int:sid>/push/pushaddall/', parquer.pushAddAll, name='pushAddAll'),
path('system/<username>/Parquer/<int:sid>/pull/', parquer.systemPull, name='systemPull'),
path('system/<username>/Others/<int:sid>/', other.systemMain, name='JingerMain'),
path('system/<username>/Others/<int:sid>/analy/', other.systemAnaly, name='JingerAnaly'),
path('system/<username>/Others/<int:sid>/analy/waringremove/', other.waringRemove, name='waringRemove'),
path('system/<username>/Others/<int:sid>/device/', other.systemDevice, name='systemDevice'),
path('system/<username>/Others/<int:sid>/device/deviceremove/', other.deviceRemove, name='deviceRemove'),
path('system/<username>/Others/<int:sid>/device/deviceadd/', other.deviceAdd, name='deviceAdd'),
path('system/<username>/Others/<int:sid>/device/<int:did>/', other.deviceDetail, name='deviceDetail'),
path('system/<username>/Others/<int:sid>/device/<int:did>/getnewdevicemap/', other.newDeviceMap,
name='newDeviceMap'),
path('system/<username>/Others/<int:sid>/device/<int:did>/waringremove/', other.waringRemove, name='waringRemove'),
path('system/<username>/Others/<int:sid>/type/', other.dataType, name='dataType'),
path('system/<username>/Others/<int:sid>/push/', other.systemPush, name='systemPush'),
path('system/<username>/Others/<int:sid>/push/pushadd/', other.pushAdd, name='pushAdd'),
path('system/<username>/Others/<int:sid>/push/pushaddall/', other.pushAddAll, name='pushAddAll'),
path('system/<username>/Others/<int:sid>/pull/', other.systemPull, name='systemPull'),
]
| python | 10,364 |
# -*- coding: utf-8 -*-
"""Cisco Identity Services Engine SMSProvider API wrapper.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
get_next_page,
)
class SmsProvider(object):
"""Identity Services Engine SMSProvider API (version: 3.0.0).
Wraps the Identity Services Engine SMSProvider
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new SmsProvider
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Identity Services Engine service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(SmsProvider, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_sms_provider(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""This API allows the client to get all the SMS providers.
Filter: [name] To search resources by using toDate
column,follow the format: DD-MON-YY
(Example:13-SEP-18) Day or Year:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13
Month:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.SEP
Date:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18
Sorting: [name, description].
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sortasc(basestring): sortasc query parameter. sort asc.
sortdsc(basestring): sortdsc query parameter. sort desc.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(page, (int, basestring, list))
check_type(size, (int, basestring, list))
check_type(sortasc, basestring)
check_type(sortdsc, basestring)
check_type(filter, (basestring, list, set, tuple))
check_type(filter_type, basestring)
_params = {
'page':
page,
'size':
size,
'sortasc':
sortasc,
'sortdsc':
sortdsc,
'filter':
filter,
'filterType':
filter_type,
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/ers/config/smsprovider')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_daac88943a5cd2bd745c483448e231_v3_0_0', _api_response)
def get_all(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""Alias for `get_sms_provider <#ciscoisesdk.
api.v3_0_0.sms_provider.
SmsProvider.get_sms_provider>`_
"""
return self.get_sms_provider(
filter=filter,
filter_type=filter_type,
page=page,
size=size,
sortasc=sortasc,
sortdsc=sortdsc,
headers=headers,
**query_parameters
)
def get_sms_provider_generator(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""This API allows the client to get all the SMS providers.
Filter: [name] To search resources by using toDate
column,follow the format: DD-MON-YY
(Example:13-SEP-18) Day or Year:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13
Month:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.SEP
Date:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18
Sorting: [name, description].
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sortasc(basestring): sortasc query parameter. sort asc.
sortdsc(basestring): sortdsc query parameter. sort desc.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
Generator: A generator object containing the following object.
+ RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
yield from get_next_page(
self.get_sms_provider, dict(
filter=filter,
filter_type=filter_type,
page=page,
size=size,
sortasc=sortasc,
sortdsc=sortdsc,
headers=headers,
**query_parameters
),
access_next_list=["SearchResult", "nextPage", "href"],
access_resource_list=["SearchResult", "resources"])
def get_all_generator(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""Alias for `get_sms_provider_generator <#ciscoisesdk.
api.v3_0_0.sms_provider.
SmsProvider.get_sms_provider_generator>`_
"""
yield from get_next_page(
self.get_sms_provider, dict(
filter=filter,
filter_type=filter_type,
page=page,
size=size,
sortasc=sortasc,
sortdsc=sortdsc,
headers=headers,
**query_parameters
),
access_next_list=["SearchResult", "nextPage", "href"],
access_resource_list=["SearchResult", "resources"])
def get_version(self,
headers=None,
**query_parameters):
"""This API helps to retrieve the version information related to
the SMS provider.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/ers/config/smsprovider/versioninfo')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e20e5400a53280d52487ecd6_v3_0_0', _api_response)
| python | 16,117 |
import asyncpg
import dbl
import discord
from discord.ext import commands
class TopGG(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.dblpy = dbl.DBLClient(self.bot, self.bot.dbl_keys[0],
webhook_path="/updoot_topgg",
webhook_auth=self.bot.dbl_keys[1],
webhook_port=3209, autopost=True)
self.db_vb = self.bot.db_villager_bot
def cog_unload(self):
self.bot.loop.create_task(self.dblpy.close())
@commands.Cog.listener()
async def on_dbl_test(self, data):
print("\u001b[35m DBL WEBHOOK TEST \u001b[0m")
channel = self.bot.get_channel(718983583779520540)
await channel.send(embed=discord.Embed(color=await self.bot.cc(), description="DBL WEBHOOK TEST"))
@commands.Cog.listener()
async def on_dbl_vote(self, data):
user_id = int(data["user"])
print(f"\u001b[32;1m {user_id} VOTED ON TOP.GG \u001b[0m")
#
user = self.bot.get_user(user_id)
#
amount = 32
prem_minutes = 15
if await self.dblpy.get_weekend_status():
amount *= 2
prem_minutes *= 2
#
u_db_bal = await self.db_vb.fetchrow("SELECT amount FROM currency WHERE id = $1", user_id)
#
if u_db_bal is not None:
if user is not None:
msg = f"Thank you for voting! You've received `{amount} emeralds` in Villager Bot " \
f"and `{prem_minutes} minutes` of Hypixel Stats **Premium**!"
await user.send(embed=discord.Embed(color=await self.bot.cc(user_id), description=msg))
#
async with self.db_vb.acquire() as con:
await con.execute("UPDATE currency SET amount = $1 WHERE id = $2", u_db_bal[0] + amount, user_id)
else:
if user is not None:
msg = f"Thank you for voting! You've received `{prem_minutes} minutes` of Hypixel Stats **Premium**!"
await user.send(embed=discord.Embed(color=await self.bot.cc(user_id), description=msg))
#
timestamp_ends = arrow.utcnow().shift(minutes=+prem_minutes).timestamp
await self.db.add_premium(user_id, timestamp_ends)
def setup(bot):
bot.add_cog(TopGG(bot))
| python | 2,380 |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ultracart
from ultracart.rest import ApiException
from ultracart.models.item_content_assignment import ItemContentAssignment
class TestItemContentAssignment(unittest.TestCase):
""" ItemContentAssignment unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testItemContentAssignment(self):
"""
Test ItemContentAssignment
"""
# FIXME: construct object with mandatory attributes with example values
#model = ultracart.models.item_content_assignment.ItemContentAssignment()
pass
if __name__ == '__main__':
unittest.main()
| python | 936 |
'''
A module for shelling out
Keep in mind that this module is insecure, in that it can give whomever has
access to the master root execution access to all salt minions
'''
# Import python libs
import logging
import os
import shutil
import subprocess
import functools
import sys
import json
import yaml
import traceback
# Import salt libs
import salt.utils
import salt.utils.timed_subprocess
from salt.exceptions import CommandExecutionError
import salt.exceptions
import salt.grains.extra
# Only available on POSIX systems, nonfatal on windows
try:
import pwd
import grp
except ImportError:
pass
# Set up logging
log = logging.getLogger(__name__)
DEFAULT_SHELL = salt.grains.extra.shell()['shell']
def __virtual__():
'''
Overwriting the cmd python module makes debugging modules
with pdb a bit harder so lets do it this way instead.
'''
return 'cmd'
def _chugid(runas):
uinfo = pwd.getpwnam(runas)
supgroups = [g.gr_gid for g in grp.getgrall()
if uinfo.pw_name in g.gr_mem and g.gr_gid != uinfo.pw_gid]
# No logging can happen on this function
#
# 08:46:32,161 [salt.loaded.int.module.cmdmod:276 ][DEBUG ] stderr: Traceback (most recent call last):
# File "/usr/lib/python2.7/logging/__init__.py", line 870, in emit
# self.flush()
# File "/usr/lib/python2.7/logging/__init__.py", line 832, in flush
# self.stream.flush()
# IOError: [Errno 9] Bad file descriptor
# Logged from file cmdmod.py, line 59
# 08:46:17,481 [salt.loaded.int.module.cmdmod:59 ][DEBUG ] Switching user 0 -> 1008 and group 0 -> 1012 if needed
#
# apparently because we closed fd's on Popen, though if not closed, output
# would also go to its stderr
if os.getgid() != uinfo.pw_gid:
try:
os.setgid(uinfo.pw_gid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from gid {0} to {1}. Error: {2}'.format(
os.getgid(), uinfo.pw_gid, err
)
)
# Set supplemental groups
if sorted(os.getgroups()) != sorted(supgroups):
try:
os.setgroups(supgroups)
except OSError as err:
raise CommandExecutionError(
'Failed to set supplemental groups to {0}. Error: {1}'.format(
supgroups, err
)
)
if os.getuid() != uinfo.pw_uid:
try:
os.setuid(uinfo.pw_uid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from uid {0} to {1}. Error: {2}'.format(
os.getuid(), uinfo.pw_uid, err
)
)
def _chugid_and_umask(runas, umask):
'''
Helper method for for subprocess.Popen to initialise uid/gid and umask
for the new process.
'''
if runas is not None:
_chugid(runas)
if umask is not None:
os.umask(umask)
def _render_cmd(cmd, cwd, template):
'''
If template is a valid template engine, process the cmd and cwd through
that engine.
'''
if not template:
return (cmd, cwd)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs['salt'] = __salt__
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['env'] = 'base'
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.mkstemp()
with salt.utils.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(contents)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
to_str=True,
**kwargs
)
salt.utils.safe_rm(tmp_path_fn)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to cmd with error: {0}'.format(
data['data']
)
)
else:
return data['data']
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd)
def _run(cmd,
cwd=None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
quiet=False,
runas=None,
shell=DEFAULT_SHELL,
env=(),
clean_env=False,
rstrip=True,
template=None,
umask=None,
timeout=None):
'''
Do the DRY thing and only call subprocess.Popen() once
'''
# Set the default working directory to the home directory
# of the user salt-minion is running as. Default: /root
if not cwd:
cwd = os.path.expanduser('~{0}'.format('' if not runas else runas))
# make sure we can access the cwd
# when run from sudo or another environment where the euid is
# changed ~ will expand to the home of the original uid and
# the euid might not have access to it. See issue #1844
if not os.access(cwd, os.R_OK):
cwd = '/'
if salt.utils.is_windows():
cwd = os.tempnam()[:3]
if not salt.utils.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = 'The shell {0} is not available'.format(shell)
raise CommandExecutionError(msg)
if shell.lower().strip() == 'powershell':
# If we were called by script(), then fakeout the Windows
# shell to run a Powershell script.
# Else just run a Powershell command.
stack = traceback.extract_stack(limit=2)
# extract_stack() returns a list of tuples.
# The last item in the list [-1] is the currrent method.
# The third item[2] in each tuple is the name of that method.
if stack[-2][2] == 'script':
cmd = 'Powershell -File ' + cmd
else:
cmd = 'Powershell ' + cmd
# munge the cmd and cwd through the template
(cmd, cwd) = _render_cmd(cmd, cwd, template)
ret = {}
if not env:
env = {}
elif isinstance(env, basestring):
try:
env = yaml.safe_load(env)
except yaml.parser.ParserError as err:
log.error(err)
env = {}
if not isinstance(env, dict):
log.error('Invalid input: {0}, must be a dict or '
'string - yaml represented dict'.format(env))
env = {}
if runas and salt.utils.is_windows():
# TODO: Figure out the proper way to do this in windows
msg = 'Sorry, {0} does not support runas functionality'
raise CommandExecutionError(msg.format(__grains__['os']))
if runas:
# Save the original command before munging it
try:
pwd.getpwnam(runas)
except KeyError:
msg = 'User \'{0}\' is not available'.format(runas)
raise CommandExecutionError(msg)
try:
# Getting the environment for the runas user
# There must be a better way to do this.
py_code = 'import os, json;' \
'print(json.dumps(os.environ.__dict__))'
if __grains__['os'] in ['MacOS', 'Darwin']:
env_cmd = ('sudo -i -u {1} -- "{2}"'
).format(shell, runas, sys.executable)
elif __grains__['os'] in ['FreeBSD']:
env_cmd = ('su - {1} -c "{0} -c \'{2}\'"'
).format(shell, runas, sys.executable)
else:
env_cmd = ('su -s {0} - {1} -c "{2}"'
).format(shell, runas, sys.executable)
env_json = subprocess.Popen(
env_cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
).communicate(py_code)[0]
env_json = (filter(lambda x: x.startswith('{') and x.endswith('}'),
env_json.splitlines()) or ['{}']).pop()
env_runas = json.loads(env_json).get('data', {})
env_runas.update(env)
env = env_runas
except ValueError:
msg = 'Environment could not be retrieved for User \'{0}\''.format(runas)
raise CommandExecutionError(msg)
if not salt.utils.is_true(quiet):
# Put the most common case first
log.info(
'Executing command {0!r} {1}in directory {2!r}'.format(
cmd, 'as user {0!r} '.format(runas) if runas else '', cwd
)
)
if not salt.utils.is_windows():
# Default to C!
# Salt only knows how to parse English words
# Don't override if the user has passed LC_ALL
env.setdefault('LC_ALL', 'C')
else:
# On Windows set the codepage to US English.
cmd = 'chcp 437 > nul & ' + cmd
if clean_env:
run_env = env
else:
run_env = os.environ.copy()
run_env.update(env)
kwargs = {'cwd': cwd,
'shell': True,
'env': run_env,
'stdin': str(stdin) if stdin is not None else stdin,
'stdout': stdout,
'stderr': stderr}
if umask:
try:
_umask = int(str(umask).lstrip('0'), 8)
if not _umask:
raise ValueError('Zero umask not allowed.')
except ValueError:
msg = 'Invalid umask: \'{0}\''.format(umask)
raise CommandExecutionError(msg)
else:
_umask = None
if runas or umask:
kwargs['preexec_fn'] = functools.partial(
_chugid_and_umask,
runas,
_umask)
if not salt.utils.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
kwargs['executable'] = shell
kwargs['close_fds'] = True
# This is where the magic happens
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
try:
proc.wait(timeout)
except salt.exceptions.TimedProcTimeoutError, e:
ret['stdout'] = e.message
ret['stderr'] = ''
ret['pid'] = proc.process.pid
# ok return code for timeouts?
ret['retcode'] = 1
return ret
out, err = proc.stdout, proc.stderr
if rstrip:
if out is not None:
out = out.rstrip()
if err is not None:
err = err.rstrip()
ret['stdout'] = out
ret['stderr'] = err
ret['pid'] = proc.process.pid
ret['retcode'] = proc.process.returncode
try:
__context__['retcode'] = ret['retcode']
except NameError:
# Ignore the context error during grain generation
pass
return ret
def _run_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=(),
template=None,
umask=None,
timeout=None):
'''
Helper for running commands quietly for minion startup
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
quiet=True,
shell=shell,
env=env,
template=template,
umask=umask,
timeout=timeout)['stdout']
def _run_all_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=(),
template=None,
umask=None,
timeout=None):
'''
Helper for running commands quietly for minion startup.
Returns a dict of return data
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
env=env,
quiet=True,
template=template,
umask=umask,
timeout=timeout)
def run(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=(),
clean_env=False,
template=None,
rstrip=True,
umask=None,
quiet=False,
timeout=None,
**kwargs):
'''
Execute the passed command and return the output as a string
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
out = _run(cmd,
runas=runas,
shell=shell,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
quiet=quiet,
timeout=timeout)['stdout']
if not quiet:
log.debug('output: {0}'.format(out))
return out
def run_stdout(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=(),
clean_env=False,
template=None,
rstrip=True,
umask=None,
quiet=False,
timeout=None,
**kwargs):
'''
Execute a command, and only return the standard out
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stdout "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stdout template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
stdout = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
quiet=quiet,
timeout=timeout)["stdout"]
if not quiet:
log.debug('stdout: {0}'.format(stdout))
return stdout
def run_stderr(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=(),
clean_env=False,
template=None,
rstrip=True,
umask=None,
quiet=False,
timeout=None,
**kwargs):
'''
Execute a command and only return the standard error
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stderr "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stderr template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
stderr = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
quiet=quiet,
timeout=timeout)["stderr"]
if not quiet:
log.debug('stderr: {0}'.format(stderr))
return stderr
def run_all(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=(),
clean_env=False,
template=None,
rstrip=True,
umask=None,
quiet=False,
timeout=None,
**kwargs):
'''
Execute the passed command and return a dict of return data
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_all "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_all template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
quiet=quiet,
timeout=timeout)
if not quiet:
if ret['retcode'] != 0:
rcode = ret['retcode']
msg = 'Command \'{0}\' failed with return code: {1}'
log.error(msg.format(cmd, rcode))
# Don't log a blank line if there is no stderr or stdout
if ret['stdout']:
log.error('stdout: {0}'.format(ret['stdout']))
if ret['stderr']:
log.error('stderr: {0}'.format(ret['stderr']))
else:
# No need to always log output on success to the logs
if ret['stdout']:
log.debug('stdout: {0}'.format(ret['stdout']))
if ret['stderr']:
log.debug('stderr: {0}'.format(ret['stderr']))
return ret
def retcode(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=(),
clean_env=False,
template=None,
umask=None,
quiet=False,
timeout=None):
'''
Execute a shell command and return the command's return code.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.retcode "file /bin/bash"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.retcode template=jinja "file {{grains.pythonpath[0]}}/python"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.retcode "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
return _run(
cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
quiet=quiet,
timeout=timeout)['retcode']
def script(
source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env='base',
template='jinja',
umask=None,
timeout=None,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
The script can also be formated as a template, the default is jinja.
Arguments for the script can be specified as well.
CLI Example:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh
salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
if not salt.utils.is_windows():
path = salt.utils.mkstemp(dir=cwd)
else:
path = __salt__['cp.cache_file'](source, env)
if not path:
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
if template:
__salt__['cp.get_template'](source, path, template, env, **kwargs)
else:
if not salt.utils.is_windows():
fn_ = __salt__['cp.cache_file'](source, env)
if not fn_:
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
if not salt.utils.is_windows():
os.chmod(path, 320)
os.chown(path, __salt__['file.user_to_uid'](runas), -1)
ret = _run(
path + ' ' + str(args) if args else path,
cwd=cwd,
stdin=stdin,
quiet=kwargs.get('quiet', False),
runas=runas,
shell=shell,
umask=umask,
timeout=timeout)
os.remove(path)
return ret
def script_retcode(
source,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env='base',
template='jinja',
umask=None,
timeout=None,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
The script can also be formated as a template, the default is jinja.
Only evaluate the script return code and do not block for terminal output
CLI Example:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
return script(
source=source,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
**kwargs)['retcode']
def which(cmd):
'''
Returns the path of an executable available on the minion, None otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.which cat
'''
return salt.utils.which(cmd)
def which_bin(cmds):
'''
Returns the first command found in a list of commands
CLI Example:
.. code-block:: bash
salt '*' cmd.which_bin '[pip2, pip, pip-python]'
'''
return salt.utils.which_bin(cmds)
def has_exec(cmd):
'''
Returns true if the executable is available on the minion, false otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.has_exec cat
'''
return bool(which(cmd))
def exec_code(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. The stdout and stderr will be returned
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code ruby 'puts "cheese"'
'''
codefile = salt.utils.mkstemp()
with salt.utils.fopen(codefile, 'w+') as fp_:
fp_.write(code)
cmd = '{0} {1}'.format(lang, codefile)
ret = run(cmd, cwd=cwd)
os.remove(codefile)
return ret
| python | 26,649 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from test_framework.test_framework import UnitETestFramework
from test_framework.util import *
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, UNIT
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
redeem_script_1 = CScript([OP_1, OP_DROP])
redeem_script_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])]
global log
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""
Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
"""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee)*UNIT), P2SH_1))
tx.vout.append(CTxOut(int(amount*UNIT), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(ToHex(tx), True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
"""
We need to generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed.
"""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change*UNIT), P2SH_1))
tx.vout.append(CTxOut(int(rem_change*UNIT), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(ToHex(tx))["hex"]
else :
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
"""
This function calls estimatefee and verifies that the estimates
meet certain invariants.
"""
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in [x for x in all_estimates if x >= 0]:
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for i,e in enumerate(all_estimates): # estimate is for i+1
if e >= 0:
valid_estimate = True
if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n)
assert node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta
else:
invalid_estimates += 1
# estimatesmartfee should still be valid
approx_estimate = node.estimatesmartfee(i+1)["feerate"]
answer_found = node.estimatesmartfee(i+1)["blocks"]
assert approx_estimate > 0
assert answer_found > i+1
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(UnitETestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"],
["-maxorphantx=1000", "-deprecatedrpc=estimatefee"],
["-maxorphantx=1000"]])
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Make log handler available to helper functions
global log
log = self.log
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 14)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| python | 12,407 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI for Invenio App ILS."""
import json
import os
import random
import re
from datetime import datetime, timedelta
from random import randint
import arrow
import click
import lorem
from flask import current_app
from flask.cli import with_appcontext
from invenio_accounts.models import User
from invenio_circulation.api import Loan
from invenio_circulation.pidstore.pids import CIRCULATION_LOAN_PID_TYPE
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_pages import Page
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_pidstore.providers.recordid_v2 import RecordIdProviderV2
from invenio_search import current_search
from invenio_userprofiles.models import UserProfile
from lorem.text import TextLorem
from .acquisition.api import ORDER_PID_TYPE, VENDOR_PID_TYPE, Order, Vendor
from .document_requests.api import DOCUMENT_REQUEST_PID_TYPE, DocumentRequest
from .documents.api import DOCUMENT_PID_TYPE, Document
from .eitems.api import EITEM_PID_TYPE, EItem
from .ill.api import BORROWING_REQUEST_PID_TYPE, LIBRARY_PID_TYPE, \
BorrowingRequest, Library
from .internal_locations.api import INTERNAL_LOCATION_PID_TYPE, \
InternalLocation
from .items.api import ITEM_PID_TYPE, Item
from .locations.api import LOCATION_PID_TYPE, Location
from .patrons.indexer import PatronIndexer
from .proxies import current_app_ils
from .records_relations.api import RecordRelationsParentChild, \
RecordRelationsSiblings
from .relations.api import Relation
from .series.api import SERIES_PID_TYPE, Series
def minter(pid_type, pid_field, record):
"""Mint the given PID for the given record."""
pid = PersistentIdentifier.get(
pid_type="recid",
pid_value=record[pid_field]
)
pid.status = PIDStatus.REGISTERED
pid.object_type = "rec"
pid.object_uuid = record.id
pid.pid_type = pid_type
class Holder(object):
"""Hold generated data."""
def __init__(
self,
patrons_pids,
languages,
librarian_pid,
tags,
total_intloc,
total_items,
total_eitems,
total_documents,
total_loans,
total_series,
total_document_requests,
total_vendors,
total_orders,
total_borrowing_requests,
total_libraries
):
"""Constructor."""
self.patrons_pids = patrons_pids
self.languages = languages
self.librarian_pid = librarian_pid
self.tags = tags
self.internal_locations = {"objs": [], "total": total_intloc}
self.items = {"objs": [], "total": total_items}
self.eitems = {"objs": [], "total": total_eitems}
self.documents = {"objs": [], "total": total_documents}
self.loans = {"objs": [], "total": total_loans}
self.series = {"objs": [], "total": total_series}
self.related_records = {"objs": [], "total": 0}
self.document_requests = {"objs": [], "total": total_document_requests}
self.vendors = {"objs": [], "total": total_vendors}
self.orders = {"objs": [], "total": total_orders}
self.borrowing_requests = {"objs": [], "total": total_borrowing_requests}
self.libraries = {"objs": [], "total": total_libraries}
def pids(self, collection, pid_field):
"""Get a list of PIDs for a collection."""
return [obj[pid_field] for obj in getattr(self, collection)["objs"]]
class Generator(object):
"""Generator."""
def __init__(self, holder, minter):
"""Constructor."""
self.holder = holder
self.minter = minter
def create_pid(self):
"""Create a new persistent identifier."""
return RecordIdProviderV2.create().pid.pid_value
def _persist(self, pid_type, pid_field, record):
"""Mint PID and store in the db."""
minter(pid_type, pid_field, record)
record.commit()
return record
class LocationGenerator(Generator):
"""Location Generator."""
def generate(self):
"""Generate."""
self.holder.location = {
"pid": self.create_pid(),
"name": "Central Library",
"address": "Rue de Meyrin",
"email": "[email protected]",
}
def persist(self):
"""Persist."""
record = Location.create(self.holder.location)
rec = self._persist(LOCATION_PID_TYPE, "pid", record)
db.session.commit()
return rec
class InternalLocationGenerator(Generator):
"""InternalLocation Generator."""
def generate(self):
"""Generate."""
size = self.holder.internal_locations["total"]
location_pid_value, _ = current_app_ils.get_default_location_pid
objs = [
{
"pid": self.create_pid(),
"legacy_id": "{}".format(randint(100000, 999999)),
"name": "Building {}".format(randint(1, 10)),
"notes": lorem.sentence(),
"physical_location": lorem.sentence(),
"location_pid": location_pid_value,
}
for pid in range(1, size + 1)
]
self.holder.internal_locations["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.internal_locations["objs"]:
rec = self._persist(
INTERNAL_LOCATION_PID_TYPE, "pid", InternalLocation.create(obj)
)
recs.append(rec)
db.session.commit()
return recs
class ItemGenerator(Generator):
"""Item Generator."""
def generate(self):
"""Generate."""
size = self.holder.items["total"]
iloc_pids = self.holder.pids("internal_locations", "pid")
doc_pids = self.holder.pids("documents", "pid")
shelf_lorem = TextLorem(wsep='-', srange=(2, 3),
words='Ax Bs Cw 8080'.split())
objs = [
{
"pid": self.create_pid(),
"document_pid": random.choice(doc_pids),
"internal_location_pid": random.choice(iloc_pids),
"legacy_id": "{}".format(randint(100000, 999999)),
"legacy_library_id": "{}".format(randint(5, 50)),
"barcode": "{}".format(randint(10000000, 99999999)),
"shelf": "{}".format(shelf_lorem.sentence()),
"description": "{}".format(lorem.text()),
"internal_notes": "{}".format(lorem.text()),
"medium": random.choice(Item.MEDIUMS),
"status": random.choice(
random.choices(population=Item.STATUSES,
weights=[0.7, 0.1, 0.1, 0.1, 0.05],
k=10
)),
"circulation_restriction": random.choice(
Item.CIRCULATION_RESTRICTIONS
),
}
for pid in range(1, size + 1)
]
demo_data_dir = os.path.join(
os.path.realpath("."), "invenio_app_ils", "demo_data"
)
with open(os.path.join(demo_data_dir, "items.json")) as f:
demo_data = json.loads(f.read())
for item in demo_data:
item["pid"] = self.create_pid()
item["document_pid"] = random.choice(doc_pids)
item["internal_location_pid"] = random.choice(iloc_pids)
objs.extend(demo_data)
self.holder.items["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.items["objs"]:
rec = self._persist(ITEM_PID_TYPE, "pid", Item.create(obj))
recs.append(rec)
db.session.commit()
return recs
class EItemGenerator(Generator):
"""EItem Generator."""
def generate(self):
"""Generate."""
size = self.holder.eitems["total"]
doc_pids = self.holder.pids("documents", "pid")
objs = [
{
"pid": self.create_pid(),
"document_pid": random.choice(doc_pids),
"description": "{}".format(lorem.text()),
"internal_notes": "{}".format(lorem.text()),
"urls": [
{
"value": "https://home.cern/science/physics/dark-matter",
"description": "Dark matter"
},
{
"value": "https://home.cern/science/physics/antimatter",
"description": "Anti matter"
},
],
"open_access": bool(random.getrandbits(1)),
}
for pid in range(1, size + 1)
]
self.holder.eitems["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.eitems["objs"]:
rec = self._persist(EITEM_PID_TYPE, "pid", EItem.create(obj))
recs.append(rec)
db.session.commit()
return recs
class DocumentGenerator(Generator):
"""Document Generator."""
PERIODICAL_ISSUE = "PERIODICAL_ISSUE"
AUTHORS = [
{"full_name": "Close, Frank"},
{"full_name": "CERN", "type": "ORGANISATION"},
{
"full_name": "Doe, Jane",
"affiliations": [{
"name": "Imperial Coll., London",
"identifiers": [{"scheme": "ROR", "value": "12345"}]
}],
"identifiers": [{"scheme": "ORCID", "value": "1234AAA"}],
"roles": ["editor"]
},
{
"full_name": "Doe, John", "roles": ["AUTHOR"],
"affiliations": [{"name": "CERN"}]
},
]
CONFERENCE_INFO = {
"acronym": "CHEP",
"country": "AU",
"dates": "1 - 20 Nov. 2019",
"identifiers": [{"scheme": "OTHER", "value": "CHEP2019"}],
"place": "Adelaide",
"series": "CHEP",
"title": "Conference on Computing in High Energy Physics",
"year": 2019,
}
IMPRINTS = [
{"date": "2019-08-02", "place": "Geneva", "publisher": "CERN"},
{"date": "2017-08-02", "place": "Hamburg", "publisher": "Springer"},
]
ISBNS = [
"0002154129", "978-1-891830-85-3", "978-1-60309-265-4",
"978-1-60309-077-3", "978-1-60309-069-8", "978-1-60309-042-1",
"978-1-891830-37-2", "978-1-60309-029-2", "978-1-891830-40-2",
"978-1-60309-442-9", "978-1-891830-56-3", "978-1-60309-432-0",
"978-1-891830-19-8", "978-1-60309-422-1", "978-1-60309-100-8",
"978-1-891830-81-5", "978-1-60309-271-5", "978-1-891830-92-1",
"978-1-60309-057-5", "978-1-60309-085-8", "978-1-60309-387-3",
"978-1-60309-036-0", "978-1-60309-053-7", "978-1-891830-97-6",
"978-0-9585783-4-9", "978-1-60309-397-2", "978-1-60309-386-6",
"978-1-60309-098-8", "978-1-60309-008-7", "978-1-60309-441-2",
"978-1-891830-55-6", "978-1-891830-86-0", "978-1-891830-91-4",
"978-1-60309-041-4", "978-1-60309-059-9", "978-1-891830-65-5",
"978-1-891830-90-7", "978-1-60309-006-3", "978-1-60309-007-0",
"978-1-60309-437-5", "978-1-891830-51-8", "978-1-60309-070-4",
"978-1-63140-984-4", "978-1-60309-393-4", "978-1-60309-152-7",
"978-1-891830-33-4", "978-1-60309-300-2", "978-1-60309-383-5",
"978-1-60309-400-9", "978-1-891830-36-5", "978-1-60309-075-9",
"978-1-891830-68-6", "978-1-60309-049-0", "978-1-60309-409-2",
"978-1-60309-068-1", "978-1-891830-29-7", "978-1-60309-367-5",
"978-1-60309-413-9", "978-1-60309-089-6", "978-1-60309-445-0",
"978-1-891830-14-3", "978-1-891830-50-1", "978-1-60309-020-9",
"978-1-60309-031-5", "978-1-60309-055-1", "978-1-891830-96-9",
"978-1-60309-043-8", "978-1-891830-87-7", "978-1-60309-033-9",
"978-1-60309-005-6", "978-1-60309-450-4", "978-1-891830-31-0",
"978-1-891830-70-9", "978-1-891830-98-3", "978-1-60309-392-7",
"978-1-60309-074-2", "978-1-891830-41-9", "978-1-60309-088-9",
"978-1-60309-440-5"
]
def generate_document(self, index, **kwargs):
"""Generate document data."""
publication_year = kwargs.get("publication_year", str(randint(1700, 2020)))
imprint = random.choice(self.IMPRINTS)
obj = {
"pid": self.create_pid(),
"title": lorem.sentence(),
"cover_metadata": {"ISBN": random.choice(self.ISBNS), "urls": {}},
"authors": random.sample(self.AUTHORS, randint(1, 3)),
"abstract": "{}".format(lorem.text()),
"document_type": random.choice(Document.DOCUMENT_TYPES),
"created_by": {"type": "script", "value": "demo"},
"languages": [
lang["key"]
for lang in random.sample(self.holder.languages, randint(1, 3))
],
"table_of_content": ["{}".format(lorem.sentence())],
"note": "{}".format(lorem.text()),
"tags": [tag["key"] for tag in random.sample(
self.holder.tags,
randint(1, len(self.holder.tags) - 1))
],
"edition": str(index),
"keywords": {
"source": lorem.sentence(),
"value": lorem.sentence()
},
"conference_info": self.CONFERENCE_INFO,
"number_of_pages": str(random.randint(0, 300)),
"imprint": {
**imprint,
"date": "{}-08-02".format(publication_year)
},
"publication_year": publication_year,
"urls": [
{
"description": "{}".format(lorem.sentence()),
"value": "http://random.url"
}
],
"restricted": False,
}
obj.update(**kwargs)
return obj
def generate(self):
"""Generate."""
size = self.holder.documents["total"]
objs = [
self.generate_document(index)
for index in range(1, size + 1)
]
# Generate periodical issues
volume = 1
issue = 1
publication_year = randint(1700, 2000)
for index in range(1, 11):
objs.append(self.generate_document(
index,
document_type=self.PERIODICAL_ISSUE,
title="Volume {} Issue {}".format(volume, issue),
publication_year=str(publication_year),
))
if issue == 3:
issue = 1
volume += 1
publication_year += 1
else:
issue += 1
demo_data_dir = os.path.join(
os.path.realpath("."), "invenio_app_ils", "demo_data"
)
with open(os.path.join(demo_data_dir, "documents.json")) as f:
demo_data = json.loads(f.read())
for doc in demo_data:
doc["pid"] = self.create_pid()
objs.extend(demo_data)
self.holder.documents["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.documents["objs"]:
rec = self._persist(DOCUMENT_PID_TYPE, "pid", Document.create(obj))
recs.append(rec)
db.session.commit()
return recs
class LoanGenerator(Generator):
"""Loan Generator."""
LOAN_STATES = ["PENDING", "ITEM_ON_LOAN", "ITEM_RETURNED", "CANCELLED"]
def _get_item_can_circulate(self, items):
"""Return an item that can circulate."""
item = items[randint(1, len(items) - 1)]
if item["status"] != "CAN_CIRCULATE":
return self._get_item_can_circulate(items)
return item
def _get_valid_status(self, item, items_on_loan):
"""Return valid loan status for the item to avoid inconsistencies."""
# cannot have 2 loans in the same item
if item["pid"] in items_on_loan:
status = self.LOAN_STATES[0]
else:
status = self.LOAN_STATES[randint(0, 3)]
return status
def _fill_loan_with_valid_request(self, loan):
"""Add fields to the loan with dates valid for a request."""
transaction_date = arrow.utcnow() - timedelta(days=randint(1, 10))
request_start_date = transaction_date + timedelta(days=15)
request_expire_date = transaction_date + timedelta(days=180)
loan["transaction_date"] = transaction_date.isoformat()
loan["request_start_date"] = request_start_date.date().isoformat()
loan["request_expire_date"] = request_expire_date.date().isoformat()
def _fill_loan_with_valid_loan(self, loan):
"""Add fields to the loan with dates valid for a on-going loan."""
transaction_date = arrow.utcnow() - timedelta(days=randint(10, 30))
start_date = transaction_date - timedelta(days=randint(1, 5))
end_date = start_date + timedelta(days=30)
loan["transaction_date"] = transaction_date.isoformat()
loan["start_date"] = start_date.date().isoformat()
loan["end_date"] = end_date.date().isoformat()
loan["extension_count"] = randint(0, 3)
def _fill_loan_with_loan_returned(self, loan):
"""Add fields to the loan with dates valid for a returned loan."""
transaction_date = arrow.utcnow() - timedelta(days=randint(50, 70))
start_date = transaction_date - timedelta(days=randint(40, 50))
end_date = start_date + timedelta(days=30)
loan["transaction_date"] = transaction_date.isoformat()
loan["start_date"] = start_date.date().isoformat()
loan["end_date"] = end_date.date().isoformat()
def _fill_loan_with_loan_cancelled(self, loan):
"""Add fields to the loan with dates valid for a cancelled loan."""
transaction_date = arrow.utcnow() - timedelta(days=randint(50, 100))
request_expire_date = transaction_date + timedelta(days=180)
start_date = transaction_date - timedelta(days=randint(40, 50))
end_date = start_date + timedelta(days=30)
loan["transaction_date"] = transaction_date.isoformat()
loan["request_expire_date"] = request_expire_date.date().isoformat()
loan["start_date"] = start_date.date().isoformat()
loan["end_date"] = end_date.date().isoformat()
loan["cancel_reason"] = "{}".format(lorem.sentence())
def _fill_loan(self, loan):
"""Fill loan with valid dates."""
if loan["state"] == "PENDING":
self._fill_loan_with_valid_request(loan)
elif loan["state"] == "ITEM_ON_LOAN":
self._fill_loan_with_valid_loan(loan)
elif loan["state"] == "ITEM_RETURNED":
self._fill_loan_with_loan_returned(loan)
elif loan["state"] == "CANCELLED":
self._fill_loan_with_loan_cancelled(loan)
return loan
def generate(self):
"""Generate."""
size = self.holder.loans["total"]
loc_pid, _ = current_app_ils.get_default_location_pid
items = self.holder.items["objs"]
patrons_pids = self.holder.patrons_pids
librarian_pid = self.holder.librarian_pid
doc_pids = self.holder.pids("documents", "pid")
all_delivery_methods = list(
current_app.config["ILS_CIRCULATION_DELIVERY_METHODS"].keys()
)
delivery = all_delivery_methods[randint(0, 1)]
items_on_loan = []
for pid in range(1, size + 1):
item = self._get_item_can_circulate(items)
item_state = self._get_valid_status(item, items_on_loan)
patron_id = random.choice(patrons_pids)
loan = {
"pid": self.create_pid(),
"document_pid": random.choice(doc_pids),
"patron_pid": "{}".format(patron_id),
"pickup_location_pid": "{}".format(loc_pid),
"state": "{}".format(item_state),
"transaction_location_pid": "{}".format(loc_pid),
"transaction_user_pid": "{}".format(librarian_pid),
"delivery": {"method": delivery},
}
loan = self._fill_loan(loan)
if item_state != "PENDING":
loan["item_pid"] = {
"type": ITEM_PID_TYPE,
"value": item["pid"]
}
items_on_loan.append(item["pid"])
self.holder.loans["objs"].append(loan)
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.loans["objs"]:
rec = self._persist(
CIRCULATION_LOAN_PID_TYPE, "pid", Loan.create(obj)
)
recs.append(rec)
db.session.commit()
return recs
class SeriesGenerator(Generator):
"""Series Generator."""
MODE_OF_ISSUANCE = ["MULTIPART_MONOGRAPH", "SERIAL"]
def random_issn(self):
"""Generate a random ISSN."""
random_4digit = [randint(1000, 9999), randint(1000, 9999)]
return "-".join(str(r) for r in random_4digit)
def random_multipart(self, obj, index):
"""Randomize multipart data."""
obj["edition"] = str(index)
for _ in range(randint(1, 2)):
obj["identifiers"].append(dict(
scheme="ISBN",
value=random.choice(DocumentGenerator.ISBNS)
))
def random_serial(self, obj):
"""Randomize serial data."""
for _ in range(randint(1, 3)):
obj["identifiers"].append(dict(
material=random.choice(["ONLINE", "PRINT"]),
scheme="ISSN",
value=self.random_issn()
))
obj["abbreviated_title"] = obj["title"].split()[0]
obj["alternative_titles"] = [
dict(
value=obj["title"],
type="SUBTITLE"
),
dict(
value=obj["title"],
type="TRANSLATED_TITLE",
language="FR",
source="CERN"
)
]
obj["internal_notes"] = [
dict(
field="title",
user="Test",
value="Internal test note."
)
]
obj["notes"] = lorem.text()
obj["publisher"] = lorem.sentence().split()[0]
obj["access_urls"] = [
dict(
open_access=True,
description=lorem.sentence(),
value="https://home.cern/"
)
for _ in range(1, 3)
]
obj["urls"] = [
dict(
description=lorem.sentence(),
value="https://home.cern/"
)
for _ in range(1, 3)
]
def generate_minimal(self, objs):
"""Generate a series with only the required fields."""
objs.append({
"pid": self.create_pid(),
"mode_of_issuance": "SERIAL",
"title": "Minimal Series",
})
def generate(self):
"""Generate."""
size = self.holder.series["total"]
objs = []
self.generate_minimal(objs)
for index in range(1, size + 1):
moi = random.choice(self.MODE_OF_ISSUANCE)
authors = random.sample(DocumentGenerator.AUTHORS, len(DocumentGenerator.AUTHORS))
obj = {
"pid": self.create_pid(),
"cover_metadata": {
"ISBN": random.choice(DocumentGenerator.ISBNS),
"urls": {},
},
"mode_of_issuance": moi,
"title": lorem.sentence(),
"authors": [author["full_name"] for author in authors],
"abstract": lorem.text(),
"languages": [
lang["key"]
for lang in random.sample(
self.holder.languages, randint(1, 3)
)
],
"identifiers": [],
"created_by": {"type": "script", "value": "demo"},
}
if moi == "SERIAL":
self.random_serial(obj)
elif moi == "MULTIPART_MONOGRAPH":
self.random_multipart(obj, index)
objs.append(obj)
self.holder.series["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.series["objs"]:
rec = self._persist(SERIES_PID_TYPE, "pid", Series.create(obj))
recs.append(rec)
db.session.commit()
return recs
class RecordRelationsGenerator(Generator):
"""Related records generator."""
@staticmethod
def random_series(series, moi):
"""Get a random series with a specific mode of issuance."""
for s in random.sample(series, len(series)):
if s["mode_of_issuance"] == moi:
return s
def generate_parent_child_relations(self, documents, series):
"""Generate parent-child relations."""
def random_docs():
docs = [
doc
for doc in documents
if doc["document_type"] != "PERIODICAL_ISSUE"
]
return random.sample(docs, randint(1, min(5, len(docs))))
objs = self.holder.related_records["objs"]
serial_parent = self.random_series(series, "SERIAL")
multipart_parent = self.random_series(series, "MULTIPART_MONOGRAPH")
multipart_children = random_docs()
serial_children = []
for document in documents:
if document["document_type"] == "PERIODICAL_ISSUE":
serial_children.append(document)
objs.append(serial_parent)
rr = RecordRelationsParentChild()
serial_relation = Relation.get_relation_by_name("serial")
multipart_relation = Relation.get_relation_by_name(
"multipart_monograph"
)
re_volume = re.compile(r'Volume (?P<volume>\d+)', re.IGNORECASE)
for index, child in enumerate(serial_children):
m = re_volume.match(child["title"])
volume = str(index + 1)
if m:
volume = m["volume"]
rr.add(
serial_parent,
child,
relation_type=serial_relation,
volume=volume,
)
objs.append(child)
for index, child in enumerate(multipart_children):
rr.add(
multipart_parent,
child,
relation_type=multipart_relation,
volume="{}".format(index + 1),
)
objs.append(child)
def generate_sibling_relations(self, documents, series):
"""Generate sibling relations."""
objs = self.holder.related_records["objs"]
rr = RecordRelationsSiblings()
def add_random_relations(relation_type):
random_docs = random.sample(
documents, randint(2, min(5, len(documents)))
)
objs.append(random_docs[0])
for record in random_docs[1:]:
rr.add(random_docs[0], record, relation_type=relation_type)
objs.append(record)
if relation_type.name == "edition":
record = self.random_series(series, "MULTIPART_MONOGRAPH")
rr.add(random_docs[0], record, relation_type=relation_type)
objs.append(record)
add_random_relations(Relation.get_relation_by_name("language"))
add_random_relations(Relation.get_relation_by_name("edition"))
def generate(self, rec_docs, rec_series):
"""Generate related records."""
self.generate_parent_child_relations(rec_docs, rec_series)
self.generate_sibling_relations(rec_docs, rec_series)
def persist(self):
"""Persist."""
db.session.commit()
return self.holder.related_records["objs"]
class DocumentRequestGenerator(Generator):
"""Document requests generator."""
def random_document_pid(self):
"""Get a random document PID."""
return random.choice(self.holder.pids("documents", "pid"))
def generate(self):
"""Generate."""
size = self.holder.document_requests["total"]
objs = []
for pid in range(1, size + 1):
state = random.choice(DocumentRequest.STATES)
obj = {
"pid": self.create_pid(),
"state": state,
"patron_pid": random.choice(self.holder.patrons_pids),
"title": lorem.sentence(),
"authors": lorem.sentence(),
"publication_year": randint(1700, 2019),
}
if state == "REJECTED":
obj["reject_reason"] = random.choice(DocumentRequest.REJECT_TYPES)
if obj["reject_reason"] == "IN_CATALOG":
obj["document_pid"] = self.random_document_pid()
elif state == "ACCEPTED":
obj["document_pid"] = self.random_document_pid()
objs.append(obj)
self.holder.document_requests["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.document_requests["objs"]:
rec = self._persist(
DOCUMENT_REQUEST_PID_TYPE, "pid", DocumentRequest.create(obj)
)
recs.append(rec)
db.session.commit()
return recs
class LibraryGenerator(Generator):
"""Location Generator."""
def random_name(self):
"""Generate random name."""
parts = lorem.sentence().split()
return " ".join(parts[:min(randint(1, 2), len(parts))])
def generate(self):
"""Generate."""
size = self.holder.vendors["total"]
objs = []
for pid in range(1, size + 1):
obj = {
"pid": self.create_pid(),
"name": self.random_name(),
"address": "CERN\n1211 Geneva 23\nSwitzerland",
"email": "[email protected]",
"phone": "+41 (0) 22 76 776 76",
"notes": lorem.sentence(),
}
objs.append(obj)
self.holder.libraries["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.libraries["objs"]:
rec = self._persist(
LIBRARY_PID_TYPE, "pid", Library.create(obj)
)
recs.append(rec)
db.session.commit()
return recs
class BorrowingRequestGenerator(Generator):
"""Borrowing requests generator."""
def random_date(self, start, end):
"""Generate random date between two dates."""
delta = end - start
int_delta = (delta.days * 24 * 3600) + delta.seconds
return start + timedelta(seconds=random.randrange(int_delta))
def random_document_pid(self):
"""Get a random document PID."""
return random.choice(self.holder.pids("documents", "pid"))
def random_library_pid(self):
"""Get a random library PID if the state is ACCEPTED."""
return random.choice(self.holder.pids("libraries", "pid"))
def random_price(self, currency, min_value=10.0):
"""Generate random price."""
return {
"currency": currency,
"value": round(min_value + random.random() * 100, 2),
}
def generate(self):
"""Generate."""
size = self.holder.borrowing_requests["total"]
objs = []
now = datetime.now()
for pid in range(1, size + 1):
obj = {
"pid": self.create_pid(),
"status": random.choice(BorrowingRequest.STATUSES),
"library_pid": self.random_library_pid(),
"document_pid": self.random_document_pid(),
"patron_pid": random.choice(self.holder.patrons_pids),
"type": "ELECTRONIC",
"notes": lorem.sentence(),
}
t = now + timedelta(days=400)
if obj["status"] != "PENDING":
obj["request_date"] = self.random_date(now, t).date().isoformat()
obj["expected_delivery_date"] = self.random_date(now, t).date().isoformat()
obj["received_date"] = self.random_date(now, t).date().isoformat()
obj["due_date"] = self.random_date(now, t).date().isoformat()
obj["payment"] = {
"debit_cost_main_currency": self.random_price("CHF"),
"debit_cost": self.random_price("EUR"),
"debit_date": self.random_date(now, t).date().isoformat(),
"debit_note": "Charged in euro",
"mode": "CREDIT_CARD",
}
obj["total_main_currency"] = self.random_price("CHF")
obj["total"] = self.random_price("EUR")
if obj["status"] == "CANCELLED":
obj["cancel_reason"] = lorem.sentence()
objs.append(obj)
self.holder.borrowing_requests["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.borrowing_requests["objs"]:
rec = self._persist(
BORROWING_REQUEST_PID_TYPE, "pid", BorrowingRequest.create(obj)
)
recs.append(rec)
db.session.commit()
return recs
class VendorGenerator(Generator):
"""Vendor generator."""
def random_name(self):
"""Generate random name."""
parts = lorem.sentence().split()
return " ".join(parts[:min(randint(1, 2), len(parts))])
def generate(self):
"""Generate."""
size = self.holder.vendors["total"]
objs = []
for pid in range(1, size + 1):
obj = {
"pid": self.create_pid(),
"name": self.random_name(),
"address": "CERN\n1211 Geneva 23\nSwitzerland",
"email": "[email protected]",
"phone": "+41 (0) 22 76 776 76",
"notes": lorem.sentence(),
}
objs.append(obj)
self.holder.vendors["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.vendors["objs"]:
rec = self._persist(
VENDOR_PID_TYPE, "pid", Vendor.create(obj)
)
recs.append(rec)
db.session.commit()
return recs
class OrderGenerator(Generator):
"""Order generator."""
def random_date(self, start, end):
"""Generate random date between two dates."""
delta = end - start
int_delta = (delta.days * 24 * 3600) + delta.seconds
return start + timedelta(seconds=random.randrange(int_delta))
def random_price(self, currency, min_value=10.0):
"""Generate random price."""
return {
"currency": currency,
"value": round(min_value + random.random() * 100, 2),
}
def random_order_lines(self, status):
"""Generate random order lines."""
doc_pids = self.holder.pids("documents", "pid")
count = randint(1, 6)
doc_pids = random.sample(doc_pids, count)
for i in range(count):
ordered = randint(1, 5)
yield dict(
copies_ordered=ordered,
copies_received=randint(1, ordered) if status == 'RECEIVED' else 0,
document_pid=doc_pids[i],
is_donation=random.choice([True, False]),
is_patron_suggestion=random.choice([True, False]),
medium="PAPER",
notes=lorem.sentence(),
patron_pid=random.choice(self.holder.patrons_pids),
payment_mode="CREDIT_CARD",
purchase_type="PERPETUAL",
recipient="PATRON",
total_price=self.random_price("EUR"),
unit_price=self.random_price("EUR"),
)
def generate(self):
"""Generate."""
size = self.holder.orders["total"]
objs = []
now = datetime.now()
for pid in range(1, size + 1):
order_date = self.random_date(datetime(2010, 1, 1), now)
status = random.choice(Order.STATUSES)
order_lines = list(self.random_order_lines(status))
obj = {
"pid": self.create_pid(),
"created_by_pid": self.holder.librarian_pid,
"vendor_pid": random.choice(self.holder.vendors["objs"])["pid"],
"status": status,
"order_date": order_date.date().isoformat(),
"notes": lorem.sentence(),
"grand_total": self.random_price("EUR", min_value=50.0),
"grand_total_main_currency": self.random_price("CHF", min_value=60.0),
"funds": list(set(lorem.sentence().split())),
"payment": {
"mode": "CREDIT_CARD",
},
"order_lines": order_lines,
}
obj["expected_delivery_date"] = self.random_date(now, now + timedelta(days=400)).date().isoformat()
if obj["status"] == "CANCELLED":
obj["cancel_reason"] = lorem.sentence()
elif obj["status"] == "RECEIVED":
obj["received_date"] = self.random_date(order_date, now).date().isoformat()
objs.append(obj)
self.holder.orders["objs"] = objs
def persist(self):
"""Persist."""
recs = []
for obj in self.holder.orders["objs"]:
rec = self._persist(
ORDER_PID_TYPE, "pid", Order.create(obj)
)
recs.append(rec)
db.session.commit()
return recs
@click.group()
def demo():
"""Demo data CLI."""
@demo.command()
def locations():
"""Create demo locations."""
click.echo("Creating locations...")
fake_holder = type("FakeHolder", (object,), {"location": {}})
loc_generator = LocationGenerator(fake_holder, minter)
loc_generator.generate()
rec = loc_generator.persist()
RecordIndexer().index(rec)
@demo.command()
@click.option("--docs", "n_docs", default=20)
@click.option("--items", "n_items", default=50)
@click.option("--eitems", "n_eitems", default=30)
@click.option("--loans", "n_loans", default=100)
@click.option("--internal-locations", "n_intlocs", default=10)
@click.option("--series", "n_series", default=10)
@click.option("--document-requests", "n_document_requests", default=10)
@click.option("--vendors", "n_vendors", default=10)
@click.option("--orders", "n_orders", default=30)
@click.option("--libraries", "n_libraries", default=10)
@click.option("--borrowing-requests", "n_borrowing_requests", default=10)
@with_appcontext
def data(
n_docs,
n_items,
n_eitems,
n_loans,
n_intlocs,
n_series,
n_document_requests,
n_vendors,
n_orders,
n_libraries,
n_borrowing_requests
):
"""Insert demo data."""
click.secho("Generating demo data", fg="yellow")
indexer = RecordIndexer()
vocabulary_dir = os.path.join(
os.path.realpath("."), "invenio_app_ils", "vocabularies", "data")
with open(os.path.join(vocabulary_dir, "tags.json")) as f:
tags = json.loads(f.read())
with open(os.path.join(vocabulary_dir, "languages.json")) as f:
languages = json.loads(f.read())
holder = Holder(
patrons_pids=["1", "2", "5", "6"],
languages=languages,
librarian_pid="4",
tags=tags,
total_intloc=n_intlocs,
total_items=n_items,
total_eitems=n_eitems,
total_documents=n_docs,
total_loans=n_loans,
total_series=n_series,
total_document_requests=n_document_requests,
total_vendors=n_vendors,
total_orders=n_orders,
total_borrowing_requests=n_borrowing_requests,
total_libraries=n_libraries,
)
# InternalLocations
intlocs_generator = InternalLocationGenerator(holder, minter)
intlocs_generator.generate()
rec_intlocs = intlocs_generator.persist()
# Series
click.echo("Creating series...")
series_generator = SeriesGenerator(holder, minter)
series_generator.generate()
rec_series = series_generator.persist()
# Documents
click.echo("Creating documents...")
documents_generator = DocumentGenerator(holder, minter)
documents_generator.generate()
rec_docs = documents_generator.persist()
# Items
click.echo("Creating items...")
items_generator = ItemGenerator(holder, minter)
items_generator.generate()
rec_items = items_generator.persist()
# EItems
click.echo("Creating eitems...")
eitems_generator = EItemGenerator(holder, minter)
eitems_generator.generate()
rec_eitems = eitems_generator.persist()
# Loans
click.echo("Creating loans...")
loans_generator = LoanGenerator(holder, minter)
loans_generator.generate()
rec_loans = loans_generator.persist()
# Related records
click.echo("Creating related records...")
related_generator = RecordRelationsGenerator(holder, minter)
related_generator.generate(rec_docs, rec_series)
related_generator.persist()
# Document requests
click.echo("Creating document requests...")
document_requests_generator = DocumentRequestGenerator(holder, minter)
document_requests_generator.generate()
rec_requests = document_requests_generator.persist()
# Vendors
click.echo("Creating acquisition vendors...")
vendor_generator = VendorGenerator(holder, minter)
vendor_generator.generate()
rec_vendors = vendor_generator.persist()
# Orders
click.echo("Creating acquisition orders...")
order_generator = OrderGenerator(holder, minter)
order_generator.generate()
rec_orders = order_generator.persist()
# Libraries
click.echo("Creating ILL external libraries...")
library_generator = LibraryGenerator(holder, minter)
library_generator.generate()
rec_libraries = library_generator.persist()
# Borrowing requests
click.echo("Creating ILL borrowing requests...")
borrowing_requests_generator = BorrowingRequestGenerator(holder, minter)
borrowing_requests_generator.generate()
rec_borrowing_requests = borrowing_requests_generator.persist()
# index internal locations
indexer.bulk_index([str(r.id) for r in rec_intlocs])
click.echo(
"Sent to the indexing queue {0} locations".format(len(rec_intlocs))
)
# index series
indexer.bulk_index([str(r.id) for r in rec_series])
click.echo("Sent to the indexing queue {0} series".format(len(rec_series)))
# index loans
indexer.bulk_index([str(r.id) for r in rec_loans])
click.echo("Sent to the indexing queue {0} loans".format(len(rec_loans)))
click.secho("Now indexing...", fg="green")
# process queue so items can resolve circulation status correctly
indexer.process_bulk_queue()
# index eitems
indexer.bulk_index([str(r.id) for r in rec_eitems])
click.echo("Sent to the indexing queue {0} eitems".format(len(rec_eitems)))
# index items
indexer.bulk_index([str(r.id) for r in rec_items])
click.echo("Sent to the indexing queue {0} items".format(len(rec_items)))
click.secho("Now indexing...", fg="green")
# process queue so documents can resolve circulation correctly
indexer.process_bulk_queue()
# index libraries
indexer.bulk_index([str(r.id) for r in rec_libraries])
click.echo(
"Sent to the indexing queue {0} libraries".format(
len(rec_libraries)
)
)
# index borrowing requests
indexer.bulk_index([str(r.id) for r in rec_borrowing_requests])
click.echo(
"Sent to the indexing queue {0} borrowing requests".format(
len(rec_borrowing_requests)
)
)
click.secho("Now indexing...", fg="green")
indexer.process_bulk_queue()
# flush all indices after indexing, otherwise ES won't be ready for tests
current_search.flush_and_refresh(index="*")
# index documents
indexer.bulk_index([str(r.id) for r in rec_docs])
click.echo(
"Sent to the indexing queue {0} documents".format(len(rec_docs))
)
# index document requests
indexer.bulk_index([str(r.id) for r in rec_requests])
click.echo(
"Sent to the indexing queue {0} document requests".format(
len(rec_requests)
)
)
# index loans again
indexer.bulk_index([str(r.id) for r in rec_loans])
click.echo("Sent to the indexing queue {0} loans".format(len(rec_loans)))
# index items again
indexer.bulk_index([str(r.id) for r in rec_items])
click.echo("Sent to the indexing queue {0} items".format(len(rec_items)))
# index vendors
indexer.bulk_index([str(r.id) for r in rec_vendors])
click.echo(
"Sent to the indexing queue {0} vendors".format(len(rec_vendors))
)
# index orders
indexer.bulk_index([str(r.id) for r in rec_orders])
click.echo(
"Sent to the indexing queue {0} orders".format(len(rec_orders))
)
click.secho("Now indexing...", fg="green")
indexer.process_bulk_queue()
@click.group()
def patrons():
"""Patrons data CLI."""
@patrons.command()
@with_appcontext
def index():
"""Index patrons."""
patrons = User.query.all()
indexer = PatronIndexer()
click.secho("Now indexing {0} patrons".format(len(patrons)), fg="green")
Patron = current_app_ils.patron_cls
for pat in patrons:
patron = Patron(pat.id)
indexer.index(patron)
def create_userprofile_for(email, username, full_name):
"""Create a fake user profile."""
user = User.query.filter_by(email=email).one_or_none()
if user:
profile = UserProfile(user_id=int(user.get_id()))
profile.username = username
profile.full_name = full_name
db.session.add(profile)
db.session.commit()
@click.group()
def fixtures():
"""Create initial data and demo records."""
@fixtures.command()
@with_appcontext
def pages():
"""Register CDS static pages."""
pages = [
Page(url='/about',
title='About',
description='About',
content='InvenioILS about page',
template_name='invenio_pages/default.html'),
Page(url='/contact',
title='Contact',
description='Contact',
content='You can contact InvenioILS developers on '
'<a href="https://gitter.im/inveniosoftware/invenio">our chatroom</a>',
template_name='invenio_pages/default.html'),
]
with db.session.begin_nested():
Page.query.delete()
db.session.add_all(pages)
db.session.commit()
click.echo('static pages created :)')
@click.command()
@click.option("--recreate-db", is_flag=True, help="Recreating DB.")
@click.option(
"--skip-demo-data", is_flag=True, help="Skip creating demo data."
)
@click.option(
"--skip-file-location",
is_flag=True,
help="Skip creating file location."
)
@click.option("--skip-patrons", is_flag=True, help="Skip creating patrons.")
@click.option(
"--skip-vocabularies",
is_flag=True,
help="Skip creating vocabularies."
)
@click.option("--skip-pages", is_flag=True, help="Skip creating static pages.")
@click.option("--verbose", is_flag=True, help="Verbose output.")
@with_appcontext
def setup(recreate_db, skip_demo_data, skip_file_location, skip_patrons,
skip_vocabularies, skip_pages, verbose):
"""ILS setup command."""
from flask import current_app
from invenio_base.app import create_cli
import redis
click.secho("ils setup started...", fg="blue")
# Clean redis
redis.StrictRedis.from_url(
current_app.config["CACHE_REDIS_URL"]
).flushall()
click.secho("redis cache cleared...", fg="red")
cli = create_cli()
runner = current_app.test_cli_runner()
def run_command(command, catch_exceptions=False):
click.secho("ils {}...".format(command), fg="green")
res = runner.invoke(cli, command, catch_exceptions=catch_exceptions)
if verbose:
click.secho(res.output)
# Remove and create db and indexes
if recreate_db:
run_command("db destroy --yes-i-know", catch_exceptions=True)
run_command("db init")
else:
run_command("db drop --yes-i-know")
run_command("db create")
run_command("index destroy --force --yes-i-know")
run_command("index init --force")
run_command("index queue init purge")
# Create roles to restrict access
run_command("roles create admin")
run_command("roles create librarian")
if not skip_patrons:
# Create users
run_command(
"users create [email protected] -a --password=123456"
) # ID 1
create_userprofile_for("[email protected]", "patron1", "Yannic Vilma")
run_command(
"users create [email protected] -a --password=123456"
) # ID 2
create_userprofile_for("[email protected]", "patron2", "Diana Adi")
run_command("users create [email protected] -a --password=123456") # ID 3
create_userprofile_for("[email protected]", "admin", "Zeki Ryoichi")
run_command(
"users create [email protected] -a --password=123456"
) # ID 4
create_userprofile_for("[email protected]", "librarian", "Hector Nabu")
run_command(
"users create [email protected] -a --password=123456"
) # ID 5
create_userprofile_for("[email protected]", "patron3", "Medrod Tara")
run_command(
"users create [email protected] -a --password=123456"
) # ID 6
create_userprofile_for("[email protected]", "patron4", "Devi Cupid")
# Assign roles
run_command("roles add [email protected] admin")
run_command("roles add [email protected] librarian")
if not skip_vocabularies:
vocabularies_dir = os.path.join(
os.path.realpath("."), "invenio_app_ils", "vocabularies", "data")
json_files = " ".join(
os.path.join(vocabularies_dir, name)
for name in os.listdir(vocabularies_dir)
if name.endswith(".json")
)
run_command("vocabulary index json --force {}".format(json_files))
run_command("vocabulary index opendefinition spdx --force")
run_command("vocabulary index opendefinition opendefinition --force")
# Assign actions
run_command("access allow superuser-access role admin")
run_command("access allow ils-backoffice-access role librarian")
# Create one location as first
run_command("demo locations")
# Index patrons
run_command("patrons index")
# Create files location
if not skip_file_location:
run_command("files location --default ils /tmp/ils-files")
# Generate demo data
if not skip_demo_data:
run_command("demo data")
# Create static pages
if not skip_pages:
run_command("fixtures pages")
click.secho("ils setup finished successfully", fg="blue")
| python | 51,543 |
import unittest
import numpy as np
from keras_self_attention.backend import keras
from keras_self_attention import ScaledDotProductAttention
class TestAttention(unittest.TestCase):
def test_sample(self):
input_layer = keras.layers.Input(
shape=(5,),
name='Input',
)
embed_layer = keras.layers.Embedding(
input_dim=4,
output_dim=5,
mask_zero=True,
weights=[
np.array([
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.2, 0.3, 0.4, 0.6, 0.5],
[0.4, 0.7, 0.2, 0.6, 0.9],
[0.3, 0.5, 0.8, 0.9, 0.1],
]),
],
name='Embedding',
)(input_layer)
att_layer = ScaledDotProductAttention(name='Attention')(embed_layer)
model = keras.models.Model(inputs=input_layer, outputs=att_layer)
model.compile(optimizer='adam', loss='mse')
model.summary()
inputs = np.array([[1, 2, 3, 1, 0]])
predict = model.predict(inputs)[0]
print(predict)
self.assertTrue(np.allclose(predict[0], predict[3]))
self.assertTrue(np.allclose(
np.asarray([0.27883747, 0.45767492, 0.47448885, 0.69199574, 0.47368336]),
predict[2],
), predict[2])
| python | 1,332 |
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2020 Intel Corporation
""" ETCD Data Write Tool """
import argparse
import logging
import os
import sys
import eis_integ
def parse_arguments(_cli_args):
""" Parse argument passed to function """
parser = argparse.ArgumentParser(description=
"Adds the contents of the json file to the etcd database.")
parser.add_argument("arg", help=
"Name of the json file whose contents should be added to the database.")
return parser.parse_args()
def main(args):
""" Calls the eis_integ.etcd_put_json function to add the contents of the json file
to the etcd database """
eis_integ.init_logger()
os.environ["ETCDCTL_ENDPOINTS"] = "https://" + eis_integ.extract_etcd_endpoint()
eis_integ.check_path_variable("ETCDCTL_CACERT", os.environ.get("ETCDCTL_CACERT"))
eis_integ.check_path_variable("ETCDCTL_CERT", os.environ.get("ETCDCTL_CERT"))
eis_integ.check_path_variable("ETCDCTL_KEY", os.environ.get("ETCDCTL_KEY"))
print("Update the etcd database or add {} file contents to the etcd database".format(args.arg))
eis_integ.etcd_put_json(eis_integ.load_json(args.arg))
return eis_integ.CODES.NO_ERROR
if __name__ == '__main__':
try:
sys.exit(main(parse_arguments(sys.argv[1:])).value)
except eis_integ.EisIntegError as exception:
logging.error("Error while adding entries to ETCD database: %s", exception)
sys.exit(exception.code.value)
| python | 1,557 |
#!/usr/bin/env python3
#=======================================================================
# Solutions to Advent of Code 2019, day 9. Based in day7_problem2.py
#=======================================================================
DEBUG = True
import itertools
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def get_input(filename):
with open(filename,'r') as f:
for line in f:
l = line.strip().split(",")
return [int(x) for x in l]
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def dp(s):
if DEBUG:
print(s)
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def read_operand(addr, mode, state, rb):
if mode == 2:
ind_addr = state[addr + rb]
op = state[ind_addr]
dp("operand = %d. Relative read from address %d" % (op, addr))
elif mode == 1:
op = state[addr]
dp("operand = %d. Immediate read from address %d" % (op, addr))
else:
ind_addr = state[addr + rb]
op = state[ind_addr]
dp("operand = %d. Position read from address %d" % (op, ind_addr))
return op
#-------------------------------------------------------------------
# cpu
# Execute the program stored in the state. Starting at address 0.
#-------------------------------------------------------------------
def cpu(ctx, indata):
# Opcodes
OP_ADD = 1
OP_MUL = 2
OP_IN = 3
OP_OUT = 4
OP_JNZ = 5
OP_JZ = 6
OP_LT = 7
OP_EQ = 8
OP_RB = 9
OP_HALT = 99
(exe_state, mem_state, ip, rb) = ctx
done = False
if exe_state == "init":
ip = 0
rb = 0
exe_state = "running"
while not done:
# Instruction fetch and decode to get op and operand modes.
instr = mem_state[ip]
op = instr % 100
dp("ip: %d, instr: %d" % (ip, instr))
mode_a = int(instr / 100) & 0x03
mode_b = int(instr / 1000) & 0x03
mode_c = int(instr / 10000) & 0x03
# Execute
if op == OP_ADD:
dp("\nOP_ADD")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
opb = read_operand(ip + 2, mode_b, mem_state, rb)
dst = mem_state[ip + 3]
dp("Writing %d to state[%d]" % (opa + opb, dst))
mem_state[dst] = opa + opb
ip += 4
elif op == OP_MUL:
dp("\nOP_MUL")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
opb = read_operand(ip + 2, mode_b, mem_state, rb)
dst = mem_state[ip + 3]
dp("Writing %d to state[%d]" % (opa * opb, dst))
mem_state[dst] = opa * opb
ip += 4
elif op == OP_IN:
dp("\nOP_IN")
if exe_state == "running":
dp("Need to get input.")
return ("in", 0, ("wait_in", mem_state, ip, rb))
else:
dp("Input received, continuing.")
exe_state = "running"
i = indata
dst = mem_state[ip + 1]
mem_state[dst] = i
dp("Got %d. Stored to state[%d]" % (i, dst))
ip += 2
elif op == OP_OUT:
dp("\nOP_OUT")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
if exe_state == "running":
dp("Need to send output.")
return ("out", opa, ("wait_out", mem_state, ip, rb))
else:
dp("Output sent, continuing.")
exe_state = "running"
dp("Output: %d" % (opa))
ip += 2
elif op == OP_JNZ:
dp("\nOP_JNZ")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
opb = read_operand(ip + 2, mode_b, mem_state, rb)
if opa != 0:
dp("opa != 0, jumping to addr %d" % (opb))
ip = opb
else:
dp("opa == 0, moving to next instruction")
ip += 3
elif op == OP_JZ:
dp("\nOP_JZ")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
opb = read_operand(ip + 2, mode_b, mem_state, rb)
if opa == 0:
ip = opb
dp("opa == 0, jumping to addr %d" % (opb))
else:
dp("opa != 0, moving to next instruction")
ip += 3
elif op == OP_LT:
dp("\nOP_LT")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
opb = read_operand(ip + 2, mode_b, mem_state, rb)
dst = state[ip + 3]
if opa < opb:
mem_state[dst] = 1
dp("opa < opb. Writing 1 to state[%d]" % (dst))
else:
mem_state[dst] = 0
dp("opa >= opb. Writing 0 to state[%d]" % (dst))
ip += 4
elif op == OP_EQ:
dp("\nOP_EQ")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
opb = read_operand(ip + 2, mode_b, mem_state, rb)
dst = mem_state[ip + 3]
if opa == opb:
mem_state[dst] = 1
dp("opa == opb. Writing 1 to state[%d]" % (dst))
else:
mem_state[dst] = 0
dp("opa != opb. Writing 0 to state[%d]" % (dst))
ip += 4
elif op == OP_RB:
dp("\nOP_RB")
opa = read_operand(ip + 1, mode_a, mem_state, rb)
dp("Setting relative base to: %d" % (opa))
rb = opa
ip += 2
elif op == OP_HALT:
dp("\nOP_HALT")
done = True
return ("done", 0, (exe_state, mem_state, ip, rb))
else:
dp("\nOP_UNKNOWN")
done = True
return ("error", 0, (exe_state, mem_state, ip, rb))
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def run_program(program):
my_program = program[:]
my_program.extend([0] * 1024)
ctx = ("init", my_program, 0, 0)
status = "init"
response = 0
while status != "done":
(status, outdata, ctx) = cpu(ctx, 0)
if status == "out":
response = outdata
dp(outdata)
if status == "error":
return False
return response
#-------------------------------------------------------------------
# problem1
#-------------------------------------------------------------------
def problem1():
print("Problem 1")
my_program = get_input("day13_input.txt")
print(my_program)
print("")
#-------------------------------------------------------------------
# problem2
#-------------------------------------------------------------------
def problem2():
TEST1 = True
print("Problem 2")
print("")
#-------------------------------------------------------------------
#-------------------------------------------------------------------
problem1()
problem2()
#=======================================================================
| python | 7,342 |
#!/usr/bin/env python
import os, sys, db, cgi, cgitb, re, json, operator
class HTML(object):
"""used to generate the HTML code to be outputted"""
def __init__(self, debug, tStr, page, area, **dbInfo):
docRoot = os.environ["DOCUMENT_ROOT"]
self.incPath = os.path.join(docRoot, 'layout')
layout = open(os.path.join(self.incPath, 'all', 'layout.json'))
self.layout = json.load(layout)
layout.close()
tags = open(os.path.join(self.incPath, 'all', 'tags.json'))
self.tags = json.load(tags)
tags.close()
self.area = area
self.page = page
pageNames = os.listdir(os.path.join(self.incPath, area, 'pages'))
pages = {}
for p in pageNames:
pages[re.split('\.', p)[0]] = p
pNameParts = re.split('_', page)
pName = ''
for p in pNameParts:
pName = " ".join([pName, p.capitalize()])
pName = pName.lstrip()
self.layout['head']['title'] = "{0} | {1}".format(tStr, pName)
self.db = db.connect(**dbInfo)
self.debug = debug
self.debugOut = ''
self.debugBody = ''
return
def generateBody(self):
areaPath = os.path.join(self.incPath, self.area)
# load layout paths
layoutPath = os.path.join(areaPath, 'layout.json')
menuPath = os.path.join(areaPath, 'menu.json')
pagePath = os.path.join(areaPath, 'pages', '{0}.json'.format(self.page))
self.debugOut += '<p>Loading packages: <br>{0}</p>'
self.debugOut = self.debugOut.format('<br> '.join([layoutPath, menuPath, pagePath]))
# read file for area layout and add to main layout
if os.path.exists(layoutPath):
f = open(layoutPath, 'r')
self.layout['body']['html'] = json.load(f)
f.close()
# read file for menu layout and add to main layout
for x in self.layout['body']['html']:
for y in self.layout['body']['html'][x]:
if self.debug:
self.debugOut += "<br>Checking layout for: {0}<br>".format(y)
if y == "menu":
if os.path.exists(menuPath):
self.debugOut += "Loaded menu layout<br>"
self.layout['body']['html'][x][y]['html'] = json.load(open(menuPath, 'r'))
else:
self.layout['body']['html'][x][y]['html'] = "err404"
elif y == "main":
if os.path.exists(pagePath):
self.debugOut += "Loaded page layout<br>"
self.layout['body']['html'][x][y]['html'] = json.load(open(pagePath, 'r'))
else:
self.layout['body']['html'][x][y]['html'] = "err404"
else:
# retain original contents
pass
self.debugOut += "<br>Body contents: <br>{0}<br>".format(self.layout['body'])
return
def recurse(self, d):
d = sorted(d.items(), key=operator.itemgetter(0))
html = ''
for k, v in d:
try:
if k in self.tags:
attrs = ''
for ak in v['attrs']:
attrs += ' {0}="{1}"'.format(ak, v['attrs'][ak])
if isinstance(v['html'], dict):
html += self.tags[k].format(attrs, self.recurse(v['html']))
else:
html += self.tags[k].format(attrs, v['html'])
else:
if isinstance(v, dict):
html += self.recurse(v)
except KeyError:
if isinstance(v, dict):
html += self.recurse(v)
except TypeError:
html += '<div class="err-msg" style="color: red;"><p>It looks like there was an error with your layout package for this page: <b>{0}</b> is not a valid key value.</p></div>'.format(k)
return html
"""Add JavaScript script to HTML head section"""
def addJS(self, sPath="", sBody=""):
index = 0 if len(self.layout['head']['js']) < 1 else len(self.layout['head']['js']) + 1
self.layout['head']['js'][index] = {'src': sPath, 'body': sBody}
return
"""Add a new link to the HTML head section"""
def addLink(self, lRel, lType, lPath, lMedia):
index = 0 if len(self.layout['head']['links']) < 1 else len(self.layout['head']['links']) + 1
self.layout['head']['links'][index] = {'rel': lRel, 'type': lType, 'href': lPath, 'media': lMedia}
return
def addMeta(self, name, content):
index = 0 if len(self.layout['head']['meta']) < 1 else len(self.layout['head']['meta']) + 1
self.layout['head']['meta'][index] = {'name': name, 'content': content}
return
"""Add a new page to the site"""
def addPage(self, pageName):
return
"""Output generated HTML code"""
def render(self):
html = self.tags['html']
head = self.tags['head']
title = self.tags['title'].format(self.layout['head']['title'])
body = self.tags['body']
metaItems = ''
metaTag = self.tags['meta']
for x in self.layout['head']['meta']:
metaItems = '\n'.join([metaItems, metaTag.format(self.layout['head']['meta'][x]['name'], self.layout['head']['meta'][x]['content'])])
linkItems = ''
linkTag = self.tags['link']
for x in self.layout['head']['links']:
linkItems = '\n'.join([linkItems, linkTag.format(self.layout['head']['links'][x]['rel'], self.layout['head']['links'][x]['type'], \
self.layout['head']['links'][x]['href'], self.layout['head']['links'][x]['media'])])
jsItems = ''
jsTag = self.tags['script']
for x in self.layout['head']['js']:
jsItems = '\n'.join([jsItems, jsTag.format(self.layout['head']['js'][x]['src'], self.layout['head']['js'][x]['body'])])
head = head.format(title, metaItems, linkItems, jsItems)
children = self.recurse(self.layout['body']['html'])
children += '<a href="?a={0}&p={1}&debug={2}">Turn Debugging {3}</a>'.format(self.area, self.page, int(not self.debug), "On" if self.debug is False else "Off")
if self.debug:
children = self.debugOut + self.debugBody + children
body = body.format(children)
html = html.format(head, body)
html = self.tags['doctype'].format(html)
return html
| python | 6,799 |
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2 * np.pi * t)
line, = ax.plot(t, s, lw=2)
ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
ax.set_ylim(-2, 2)
plt.show()
| python | 321 |
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- win32net
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import
# Import python libs
import logging
import time
from datetime import datetime
# Import 3rd Party Libs
try:
import pythoncom
import wmi
import win32net
import win32api
import win32con
import pywintypes
import ctypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Import salt libs
import salt.utils
import salt.utils.locales
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Set the system module of the kernel is Windows
'''
if HAS_WIN32NET_MODS and salt.utils.is_windows():
return __virtualname__
return (False, "Module win_system: module only works on Windows systems")
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def _convert_date_time_string(dt_string):
'''
convert string to date time object
'''
dt_string = dt_string.split('.')[0]
dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S')
return dt_obj.strftime('%Y-%m-%d %H:%M:%S')
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
:param int timeout:
Number of seconds before halting the system.
Default is 5 seconds.
:return: True is successful.
:rtype: bool
timeout
The wait time before the system will be shutdown.
in_seconds
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' system.halt 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument
'''
Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
# cmd = ['init', runlevel]
# ret = __salt__['cmd.run'](cmd, python_shell=False)
# return ret
# TODO: Create a mapping of runlevels to # pylint: disable=fixme
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
:param int timeout:
Number of seconds before powering off the system.
Default is 5 seconds.
:return: True if successful
:rtype: bool
timeout
The wait time before the system will be shutdown.
in_seconds
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disable=redefined-outer-name
only_on_pending_reboot=False):
'''
Reboot a running system.
:param int timeout:
Number of minutes/seconds before rebooting the system. Minutes vs
seconds depends on the value of ``in_seconds``.
Default is 5 minutes.
:param bool in_seconds:
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
:param bool wait_for_reboot:
Sleeps for timeout + 30 seconds after reboot has been initiated.
This is useful for use in a highstate for example where
you have many states that could be ran after this one. Which you don't want
to start until after the restart i.e You could end up with a half finished state.
.. versionadded:: 2015.8.0
:param bool only_on_pending_reboot:
If this is set to True, then then the reboot will only proceed
if the system reports a pending reboot. Setting this paramater to
True could be useful when calling this function from a final housekeeping
state intended to be executed
at the end of a state run (using *order: last*).
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
salt '*' system.reboot 5 True
As example of invoking this function from within a final housekeeping state
is as follows:
Example:
.. code-block:: yaml
final housekeeping:
module.run:
- name: system.reboot
- only_on_pending_reboot: True
- order: last
'''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot)
if wait_for_reboot:
seconds = _convert_minutes_seconds(timeout, in_seconds)
time.sleep(seconds + 30)
return ret
def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: disable=redefined-outer-name
in_seconds=False, only_on_pending_reboot=False):
'''
Shutdown a running system.
:param str message:
A message to display to the user before shutting down.
:param int timeout:
The length of time that the shutdown dialog box should be displayed, in
seconds. While this dialog box is displayed, the shutdown can be stopped
by the shutdown_abort function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box on
the specified computer. The dialog box displays the name of the user
who called the function, displays the message specified by the
lpMessage parameter, and prompts the user to log off. The dialog box
beeps when it is created and remains on top of other windows in the
system. The dialog box can be moved but not closed. A timer counts down
the remaining time before a forced shutdown.
If timeout is zero, the computer shuts down without displaying the
dialog box, and the shutdown cannot be stopped by shutdown_abort.
Default is 5 minutes
:param bool in_seconds:
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
:param bool force_close:
True to force close all open applications. False displays a dialog box
instructing the user to close the applications.
:param bool reboot:
True restarts the computer immediately after shutdown.
False caches to disk and safely powers down the system.
:param bool only_on_pending_reboot:
If this is set to True, then then shutdown will only proceed
if the system reports a pending reboot.
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.shutdown 5
'''
timeout = _convert_minutes_seconds(timeout, in_seconds)
if only_on_pending_reboot and not get_pending_reboot():
return True
if message and not isinstance(message, str):
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc
log.error('Failed to shutdown the system')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be aborted
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc
log.error('Failed to abort system shutdown')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
def lock():
'''
Lock the workstation.
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt 'minion-id' system.lock
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
:param str name:
The new name to give the computer. Requires a reboot to take effect.
:return:
Returns a dictionary containing the old and new names if successful.
False if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if name and not isinstance(name, str):
name = name.decode('utf-8')
if windll.kernel32.SetComputerNameExW(win32con.ComputerNamePhysicalDnsHostname,
name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
:return:
Returns the pending name if pending restart. Returns none if not pending
restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = __salt__['reg.read_value'](
'HKLM',
r'SYSTEM\CurrentControlSet\Services\Tcpip\Parameters',
'NV Hostname')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
:return:
Returns the computer name if found. Otherwise returns False
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname)
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
:param str desc:
The computer description
:return: False if it fails. Description if successful.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc is None:
return False
if not isinstance(desc, str):
desc = desc.decode('utf-8')
system_info['comment'] = desc
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc
log.error('Failed to update system')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.alias_function(set_computer_desc, 'set_computer_description') # pylint: disable=invalid-name
def get_system_info():
'''
Get system information.
:return:
Returns a Dictionary containing information about the system to include
name, description, version, etc...
:rtype: dict
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_info
'''
os_type = {1: 'Work Station',
2: 'Domain Controller',
3: 'Server'}
pythoncom.CoInitialize()
conn = wmi.WMI()
system = conn.Win32_OperatingSystem()[0]
ret = {'name': get_computer_name(),
'description': system.Description,
'install_date': system.InstallDate,
'last_boot': system.LastBootUpTime,
'os_manufacturer': system.Manufacturer,
'os_name': system.Caption,
'users': system.NumberOfUsers,
'organization': system.Organization,
'os_architecture': system.OSArchitecture,
'primary': system.Primary,
'os_type': os_type[system.ProductType],
'registered_user': system.RegisteredUser,
'system_directory': system.SystemDirectory,
'system_drive': system.SystemDrive,
'os_version': system.Version,
'windows_directory': system.WindowsDirectory}
system = conn.Win32_ComputerSystem()[0]
ret.update({'hardware_manufacturer': system.Manufacturer,
'hardware_model': system.Model,
'processors': system.NumberOfProcessors,
'processors_logical': system.NumberOfLogicalProcessors,
'system_type': system.SystemType})
system = conn.Win32_BIOS()[0]
ret.update({'hardware_serial': system.SerialNumber,
'bios_manufacturer': system.Manufacturer,
'bios_version': system.Version,
'bios_details': system.BIOSVersion,
'bios_caption': system.Caption,
'bios_description': system.Description})
ret['install_date'] = _convert_date_time_string(ret['install_date'])
ret['last_boot'] = _convert_date_time_string(ret['last_boot'])
return ret
def get_computer_desc():
'''
Get the Windows computer description
:return:
Returns the computer description if found. Otherwise returns False
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['description']
return False if desc is None else desc
get_computer_description = salt.utils.alias_function(get_computer_desc, 'get_computer_description') # pylint: disable=invalid-name
def get_hostname():
'''
.. versionadded:: 2016.3.0
Get the hostname of the windows minion
:return:
Returns the hostname of the windows minion
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_hostname
'''
cmd = 'wmic nicconfig get dnshostname'
ret = __salt__['cmd.run'](cmd=cmd)
_, _, hostname = ret.split("\n")
return hostname
def set_hostname(hostname):
'''
.. versionadded:: 2016.3.0
Set the hostname of the windows minion, requires a restart before this
will be updated.
:param str hostname:
The hostname to set
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_hostname newhostname
'''
curr_hostname = get_hostname()
cmd = "wmic computersystem where name='{0}' call rename name='{1}'".format(curr_hostname, hostname)
ret = __salt__['cmd.run'](cmd=cmd)
return "successful" in ret
def _lookup_error(number):
'''
Lookup the error based on the passed number
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
:param int number: Number code to lookup
:return: The text that corresponds to the error number
:rtype: str
'''
return_values = {
2: 'Invalid OU or specifying OU is not supported',
5: 'Access is denied',
53: 'The network path was not found',
87: 'The parameter is incorrect',
110: 'The system cannot open the specified object',
1323: 'Unable to update the password',
1326: 'Logon failure: unknown username or bad password',
1355: 'The specified domain either does not exist or could not be contacted',
2224: 'The account already exists',
2691: 'The machine is already joined to the domain',
2692: 'The machine is not currently joined to a domain',
}
return return_values[number]
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires reboot.
:param str domain:
The domain to which the computer should be joined, e.g.
``example.com``
:param str username:
Username of an account which is authorized to join computers to the
specified domain. Need to be either fully qualified like
``[email protected]`` or simply ``user``
:param str password:
Password of the specified user
:param str account_ou:
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
:param bool account_exists:
If set to ``True`` the computer will only join the domain if the account
already exists. If set to ``False`` the computer account will be created
if it does not exist, otherwise it will use the existing account.
Default is False
:param bool restart: Restarts the computer after a successful join
.. versionadded:: 2015.8.2/2015.5.7
:returns: Returns a dictionary if successful. False if unsuccessful.
:rtype: dict, bool
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, str):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
pythoncom.CoInitialize()
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.JoinDomainOrWorkgroup(Name=domain,
Password=password,
UserName=username,
AccountOU=account_ou,
FJoinOptions=join_options)
# you have to do this because JoinDomainOrWorkgroup returns a strangely
# formatted value that looks like (0,)
if not err[0]:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
log.error(_lookup_error(err[0]))
return False
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
r'''
Unjoin a computer from an Active Directory Domain. Requires restart.
:param username:
Username of an account which is authorized to manage computer accounts
on the domain. Need to be fully qualified like ``[email protected]`` or
``domain.tld\user``. If domain not specified, the passed domain will be
used. If computer account doesn't need to be disabled, can be None.
:param str password:
Password of the specified user
:param str domain: The domain from which to unjoin the computer. Can be None
:param str workgroup: The workgroup to join the computer to. Default is
``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7
:param bool disable:
Disable the computer account in Active Directory. True to disable.
Default is False
:param bool restart: Restart the computer after successful unjoin
.. versionadded:: 2015.8.2/2015.5.7
:returns: Returns a dictionary if successful. False if unsuccessful.
:rtype: dict, bool
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x2 # pylint: disable=invalid-name
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
pythoncom.CoInitialize()
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(_lookup_error(err[0]))
log.error('Failed to join the computer to {0}'.format(workgroup))
return False
else:
log.error(_lookup_error(err[0]))
log.error('Failed to unjoin computer from {0}'.format(status['Domain']))
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
:return: The name of the domain or workgroup
:rtype: str
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_domain_workgroup
'''
pythoncom.CoInitialize()
conn = wmi.WMI()
for computer in conn.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Domain}
def _try_parse_datetime(time_str, fmts):
'''
Attempts to parse the input time_str as a date.
:param str time_str: A string representing the time
:param list fmts: A list of date format strings
:return: Returns a datetime object if parsed properly. Otherwise None
:rtype datetime
'''
result = None
for fmt in fmts:
try:
result = datetime.strptime(time_str, fmt)
break
except ValueError:
pass
return result
def get_system_time():
'''
Get the system time.
:return: Returns the system time in HH:MM:SS AM/PM format.
:rtype: str
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_system_time
'''
now = win32api.GetLocalTime()
meridian = 'AM'
hours = int(now[4])
if hours == 12:
meridian = 'PM'
elif hours == 0:
hours = 12
elif hours > 12:
hours = hours - 12
meridian = 'PM'
return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
def set_system_time(newtime):
'''
Set the system time.
:param str newtime:
The time to set. Can be any of the following formats.
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
:return: Returns True if successful. Otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_system_time 12:01
'''
# Get date/time object from newtime
fmts = ['%I:%M:%S %p', '%I:%M %p', '%H:%M:%S', '%H:%M']
dt_obj = _try_parse_datetime(newtime, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(hours=dt_obj.hour,
minutes=dt_obj.minute,
seconds=dt_obj.second)
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
:param int years: Years digit, ie: 2015
:param int months: Months digit: 1 - 12
:param int days: Days digit: 1 - 31
:param int hours: Hours digit: 0 - 23
:param int minutes: Minutes digit: 0 - 59
:param int seconds: Seconds digit: 0 - 59
:return: True if successful. Otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc
log.error('Failed to get local time')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
return succeeded is not 0
except OSError:
log.error('Failed to set local time')
return False
return True
def get_system_date():
'''
Get the Windows system date
:return: Returns the system date.
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
now = win32api.GetLocalTime()
return '{0:02d}/{1:02d}/{2:04d}'.format(now[1], now[3], now[0])
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
:param str newdate:
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day)
def start_time_service():
'''
Start the Windows time service
:return: True if successful. Otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
:return: True if successful. Otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
def get_pending_component_servicing():
'''
Determine whether there are pending Component Based Servicing tasks that require a reboot.
:return: A boolean representing whether there are pending Component Based Servicing tasks.
:rtype: bool
.. versionadded:: Carbon
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_component_servicing
'''
vname = '(Default)'
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending'
reg_ret = __salt__['reg.read_value']('HKLM', key, vname)
# So long as the registry key exists, a reboot is pending.
if reg_ret['success']:
log.debug('Found key: %s', key)
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_domain_join():
'''
Determine whether there is a pending domain join action that requires a reboot.
:return: A boolean representing whether there is a pending domain join action.
:rtype: bool
.. versionadded:: Carbon
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join
'''
vname = '(Default)'
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
# If either the avoid_key or join_key is present,
# then there is a reboot pending.
avoid_reg_ret = __salt__['reg.read_value']('HKLM', avoid_key, vname)
if avoid_reg_ret['success']:
log.debug('Found key: %s', avoid_key)
return True
else:
log.debug('Unable to access key: %s', avoid_key)
join_reg_ret = __salt__['reg.read_value']('HKLM', join_key, vname)
if join_reg_ret['success']:
log.debug('Found key: %s', join_key)
return True
else:
log.debug('Unable to access key: %s', join_key)
return False
def get_pending_file_rename():
'''
Determine whether there are pending file rename operations that require a reboot.
:return: A boolean representing whether there are pending file rename operations.
:rtype: bool
.. versionadded:: Carbon
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_file_rename
'''
vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')
key = r'SYSTEM\CurrentControlSet\Control\Session Manager'
# If any of the value names exist and have value data set,
# then a reboot is pending.
for vname in vnames:
reg_ret = __salt__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
if reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)'):
return True
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a reboot.
:return: A boolean representing whether there are pending Server Manager tasks.
:rtype: bool
.. versionadded:: Carbon
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot wont be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __salt__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False
def get_pending_update():
'''
Determine whether there are pending updates that require a reboot.
:return: A boolean representing whether there are pending updates.
:rtype: bool
.. versionadded:: Carbon
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_update
'''
vname = '(Default)'
key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired'
reg_ret = __salt__['reg.read_value']('HKLM', key, vname)
# So long as the registry key exists, a reboot is pending.
if reg_ret['success']:
log.debug('Found key: %s', key)
return True
else:
log.debug('Unable to access key: %s', key)
return False
MINION_VOLATILE_KEY = r'SYSTEM\CurrentControlSet\Services\salt-minion\Volatile-Data'
REBOOT_REQUIRED_NAME = 'Reboot required'
def set_reboot_required_witnessed():
r'''
.. versionadded:: Carbon
This function is used to remember that
an event indicating that a reboot is required was witnessed.
This function relies on the salt-minion's ability to create the following
volatile registry key in the *HKLM* hive:
*SYSTEM\\CurrentControlSet\\Services\\salt-minion\\Volatile-Data*
Because this registry key is volatile, it will not persist
beyond the current boot session.
Also, in the scope of this key, the name *'Reboot required'* will be
assigned the value of *1*.
(For the time being, this this function is being used
whenever an install completes with exit code 3010 and
this usage can be extended where appropriate in the future.)
:return: A boolean indicating whether or not the salt minion was
able to perform the necessary registry operations.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
return __salt__['reg.set_value'](hive='HKLM',
key=MINION_VOLATILE_KEY,
volatile=True,
vname=REBOOT_REQUIRED_NAME,
vdata=1,
vtype='REG_DWORD')
def get_reboot_required_witnessed():
'''
.. versionadded:: Carbon
This tells us if, at any time during the current boot session
the salt minion witnessed an event indicating
that a reboot is required.
(For the time being, this function will return True
if an install completed with exit code 3010 during the current
boot session and this usage can be extended where appropriate
in the future)
:return: a boolean which will be True if the salt-minion reported
a required reboot during the current boot session, otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
value_dict = __salt__['reg.read_value'](hive='HKLM',
key=MINION_VOLATILE_KEY,
vname=REBOOT_REQUIRED_NAME)
return value_dict['vdata'] == 1
def get_pending_reboot():
'''
Determine whether there is a reboot pending.
:return: A boolean representing whether reboots are pending.
:rtype: bool
.. versionadded:: Carbon
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_reboot
'''
# Order the checks for reboot pending in most to least likely.
checks = (get_pending_update, get_pending_file_rename, get_pending_servermanager,
get_pending_component_servicing,
get_reboot_required_witnessed,
get_pending_computer_name,
get_pending_domain_join)
for check in checks:
if check():
return True
return False
| python | 38,028 |
from covid.wallet.puzzles.load_clvm import load_serialized_clvm
GENERATOR_FOR_SINGLE_COIN_MOD = load_serialized_clvm("generator_for_single_coin.clvm", package_or_requirement=__name__)
| python | 185 |
#==============================#
# System Import #
#==============================#
#==============================#
# Platform Import #
#==============================#
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
#==============================#
# Class/Layer Part Import #
#==============================#
from layers.basic import Linear, LSTM
from .Initializer import Initializer
class Decoder_Flat(nn.Module):
def __init__(self, options):
super(Decoder_Flat, self).__init__()
self.init = Initializer(options['init'])
self.main = LSTM(options['main'])
self.predict = Linear(options['predict'])
def forward(self, target_emb, len_target, state_below, hidden_prev = None):
h_e, source_mask = state_below
if (hidden_prev is None) or (hidden_prev[0] is None) or (hidden_prev[1] is None):
h_d_init, c_d_init = self.init(h_e, source_mask)
sorted_len_target, IDs = torch.sort(len_target, descending = True)
_, bIDs = torch.sort(IDs, descending = False)
sorted_target_emb = torch.index_select(target_emb, 0, IDs)
packed_target_emb = pack_padded_sequence(sorted_target_emb, [int(it) for it in list(sorted_len_target)], batch_first = True)
if (hidden_prev is None) or (hidden_prev[0] is None) or (hidden_prev[1] is None):
h_d_init = torch.index_select(h_d_init, 0, IDs)
c_d_init = torch.index_select(c_d_init, 0, IDs)
if hidden_prev is None:
h_d, (ph_d, pc_d) = self.main(packed_target_emb, (h_d_init, c_d_init))
else:
if hidden_prev[0] is None:
hidden_prev = (h_d_init, hidden_prev[1])
if hidden_prev[1] is None:
hidden_prev = (hidden_prev[0], c_d_init)
h_d, (ph_d, pc_d) = self.main(packed_target_emb, hidden_prev)
h_d = pad_packed_sequence(h_d, batch_first=True)
h_d = torch.index_select(h_d[0], 0, bIDs)
ph_d = torch.index_select(ph_d, 1, bIDs)
pc_d = torch.index_select(pc_d, 1, bIDs)
pred = F.softmax(self.predict(h_d), dim = 2)
return [pred], [ph_d, pc_d]
| python | 2,349 |
ten_things = "Apples Oranges Crows Telephone Light Sugar"
print ("Wait there's not 10 things in that list, let's fix that.")
stuff = ten_things.split(' ')
more_stuff = ["Day", "Night", "Song", "Frisbee", "Corn", "Banana", "Girl", "Boy"]
while len(stuff) != 10:
next_one = more_stuff.pop()
print ("Adding: ", next_one)
stuff.append(next_one)
print ("There's %d items now." % len(stuff))
print ("There we go: ", stuff)
print ("Let's do some things with stuff.")
print (stuff[1])
print (stuff[-1]) # whoa! fancy
print (stuff.pop())
print (' '.join(stuff)) # what? cool
print ('#'.join(stuff[3:5])) # super stellar!
| python | 647 |
import os
import zipfile
import rarfile
import sys
from send2trash import send2trash
from wand.image import Image
# definicion de variables globales
if sys.platform == 'win32':
# Set to full path of unrar.exe if it is not in PATH
rarfile.UNRAR_TOOL = "C:\\Program Files\\WinRAR\\UnRAR.exe"
# definicion del directorio base
base_dir = 'D:\\comics\\modificaciones'
else:
# Set to full path of unrar.exe if it is not in PATH
rarfile.UNRAR_TOOL = '/usr/bin/unrar'
base_dir = '/media/randall/Datos/comics/modificaciones'
dir_modificados = os.path.join(base_dir,'modificados')
# el ancho de la pagina deseado
ancho = 1280
s_ancho = str(ancho) + 'x'
doble_ancho = ancho * 2
s_doble_ancho = str(doble_ancho) + 'x'
factor_compresion = 85
# funcion para determinar el tipo de archivo
def Tipo_Archivo (archivo):
# identificadores de archivo
_RAR5_ID = b"Rar!\x1a\x07\x01\x00"
_RAR_ID = b"Rar!\x1a\x07\x00"
_ZIP_ID = b"PK"
_7Z_ID = b"7z\xbc\xaf\x27\x1c"
# abre el archivo para obtener los primeros bytes
with open(archivo, 'rb') as f:
buf = f.read(len(_RAR5_ID))
# determina el tipo
if buf.startswith(_RAR_ID):
tipo = 'RAR'
elif buf.startswith(_RAR5_ID):
tipo = 'RAR'
elif buf.startswith(_ZIP_ID):
tipo = 'ZIP'
elif buf.startswith(_7Z_ID):
tipo = '7Z'
else:
tipo = 'XX'
# retorna el valor
return tipo
# procesar el .jpg
def procesar_imagen (archivo):
(_,extension) = os.path.splitext(archivo)
# solo los .jpg, ignora el resto de archivos
if extension.lower() in ('.jpg', '.jpeg'):
with Image(filename = archivo, resolution = 72) as img:
(width, height) = img.size
# verifica que no sea una pagina doble
if width < height:
# si ya el ancho de la pagina es menor, no hace nada
if width > ancho:
img.transform(resize = s_ancho)
else:
if width > doble_ancho:
img.transform(resize = s_doble_ancho)
# reduce el tamaño del archivo
img.strip()
img.interlace_scheme = 'plane'
img.compression_quality = factor_compresion
img.save(filename = archivo)
def procesar_archivo (archivo):
(nombre,_) = os.path.splitext(archivo)
# lista para almacenar los archivos del comprimido
lista_archivos = []
# crea un directorio para descomprimir
if not os.path.exists (nombre):
os.makedirs(nombre)
# determina el tipo del archivo
# no se hace por extension porque a veces lo nombran cbr, pero por dentro
# en realidad es un cbz
tipo_archivo = Tipo_Archivo (archivo)
# si es un .zip o un .cbz, crea un directorio con el nombre y
# extrae los archivos que contiene
if tipo_archivo == 'ZIP':
zip = zipfile.ZipFile(archivo)
# primero descomprime todo
zip.extractall(path = nombre)
# luego recorre el contenido del .zip y procesa las imagenes
os.chdir (nombre)
for archivo_imagen in zip.namelist():
lista_archivos.append(os.path.sep.join(archivo_imagen.split('/')))
procesar_imagen (os.path.sep.join(archivo_imagen.split('/')))
zip.close()
# si es un .rar o un .cbr, crea un directorio con el nombre y
# extrae los archivos que contiene
elif tipo_archivo == 'RAR':
rar = rarfile.RarFile(archivo)
# primero descomprime todo
rar.extractall(path = nombre)
# luego recorre el contenido del .zip y procesa las imagenes
os.chdir (nombre)
for archivo_imagen in rar.namelist():
lista_archivos.append(os.path.sep.join(archivo_imagen.split('/')))
procesar_imagen (os.path.sep.join(archivo_imagen.split('/')))
rar.close()
elif tipo_archivo == '7Z':
print('El archivo es un 7z - no se puede procesar')
else:
print('No es posible determinar el tipo de archivo')
# crear el nuevo zip con las imagenes modificadas
new_zip = zipfile.ZipFile(os.path.join(dir_modificados,nombre) + '.cbz', 'w')
for nuevo_archivo in lista_archivos:
new_zip.write(nuevo_archivo, compress_type=zipfile.ZIP_DEFLATED)
new_zip.close()
# se cambia al directorio principal y elimina el directorio
# donde se descomprimieron las imagenes
os.chdir(base_dir)
send2trash(nombre)
def main():
# inicio
list_files = []
if os.path.exists (base_dir):
os.chdir(base_dir)
# si no existe el directorio destino, entonces se crea
if not os.path.exists (dir_modificados):
os.makedirs (dir_modificados)
# recorre el directorio base en busqueda de los archivos de comics
for archivo in os.listdir():
(_,extension) = os.path.splitext(archivo)
if (extension in ('.cbz','.zip','.cbr','.rar')):
list_files.append(archivo)
# proces los archivos de comics
for file in list_files:
try:
print ("Procesando archivo " + file)
procesar_archivo (file)
except:
print ("Error en archivo " + file)
if __name__ == '__main__':
main()
| python | 5,239 |
# Generated by Django 3.0.8 on 2020-08-06 12:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('query', models.CharField(max_length=100, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='SearchResult',
fields=[
('id', models.CharField(max_length=20, primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('htmlTitle', models.CharField(max_length=125)),
('link', models.CharField(max_length=150)),
('snippet', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='SearchMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page', models.IntegerField()),
('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.SearchResult')),
('search', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.Search')),
],
),
migrations.AddField(
model_name='search',
name='results',
field=models.ManyToManyField(related_name='search', through='engine.SearchMeta', to='engine.SearchResult'),
),
]
| python | 1,784 |
from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
import textwrap
required_conan_version = ">=1.33.0"
class OpenALConan(ConanFile):
name = "openal"
description = "OpenAL Soft is a software implementation of the OpenAL 3D audio API."
topics = ("conan", "openal", "audio", "api")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.openal.org"
license = "MIT"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
@property
def _supports_cxx14(self):
if self.settings.compiler == "clang" and self.settings.compiler.libcxx in ("libstdc++", "libstdc++11"):
if tools.Version(self.settings.compiler.version) < "9":
return False, "openal on clang {} cannot be built with stdlibc++(11) c++ runtime".format(self.settings.compiler.version)
min_version = {
"Visual Studio": "15",
"gcc": "5",
"clang": "5",
}.get(str(self.settings.compiler))
if min_version:
if tools.Version(self.settings.compiler.version) < min_version:
return False, "This compiler version does not support c++14"
else:
return True, "Unknown compiler. Assuming your compiler supports c++14"
return True, None
@property
def _supports_cxx11(self):
if self.settings.compiler == "clang" and self.settings.compiler.libcxx in ("libstdc++", "libstdc++11"):
if tools.Version(self.settings.compiler.version) < "9":
return False, "openal on clang {} cannot be built with stdlibc++(11) c++ runtime".format(self.settings.compiler.version)
min_version = {
"Visual Studio": "13",
"gcc": "5",
"clang": "5",
}.get(str(self.settings.compiler))
if min_version:
if tools.Version(self.settings.compiler.version) < min_version:
return False, "This compiler version does not support c++11"
else:
return True, "Unknown compiler. Assuming your compiler supports c++11"
return True, None
@property
def _openal_cxx_backend(self):
return tools.Version(self.version) >= "1.20"
def configure(self):
if self.options.shared:
del self.options.fPIC
# OpenAL's API is pure C, thus the c++ standard does not matter
# Because the backend is C++, the C++ STL matters
del self.settings.compiler.cppstd
if not self._openal_cxx_backend:
del self.settings.compiler.libcxx
if tools.Version(self.version) >= "1.21":
ok, msg = self._supports_cxx14
if not ok:
raise ConanInvalidConfiguration(msg)
if msg:
self.output.warn(msg)
elif tools.Version(self.version) >= "1.20":
ok, msg = self._supports_cxx11
if not ok:
raise ConanInvalidConfiguration(msg)
if msg:
self.output.warn(msg)
def requirements(self):
if self.settings.os == "Linux":
self.requires("libalsa/1.2.4")
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["LIBTYPE"] = "SHARED" if self.options.shared else "STATIC"
self._cmake.definitions["ALSOFT_UTILS"] = False
self._cmake.definitions["ALSOFT_EXAMPLES"] = False
self._cmake.definitions["ALSOFT_TESTS"] = False
self._cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_SoundIO"] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
self._create_cmake_module_variables(
os.path.join(self.package_folder, self._module_file_rel_path)
)
@staticmethod
def _create_cmake_module_variables(module_file):
content = textwrap.dedent("""\
if(DEFINED OpenAL_FOUND)
set(OPENAL_FOUND ${OpenAL_FOUND})
endif()
if(DEFINED OpenAL_INCLUDE_DIR)
set(OPENAL_INCLUDE_DIR ${OpenAL_INCLUDE_DIR})
endif()
if(DEFINED OpenAL_LIBRARIES)
set(OPENAL_LIBRARY ${OpenAL_LIBRARIES})
endif()
if(DEFINED OpenAL_VERSION)
set(OPENAL_VERSION_STRING ${OpenAL_VERSION})
endif()
""")
tools.save(module_file, content)
@property
def _module_subfolder(self):
return os.path.join("lib", "cmake")
@property
def _module_file_rel_path(self):
return os.path.join(self._module_subfolder,
"conan-official-{}-variables.cmake".format(self.name))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "OpenAL"
self.cpp_info.names["cmake_find_package_multi"] = "OpenAL"
self.cpp_info.names["pkg_config"] = "openal"
self.cpp_info.builddirs.append(self._module_subfolder)
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.includedirs.append(os.path.join("include", "AL"))
if self.settings.os == "Linux":
self.cpp_info.system_libs.extend(["dl", "m"])
elif self.settings.os == "Macos":
self.cpp_info.frameworks.extend(["AudioToolbox", "CoreAudio", "CoreFoundation"])
elif self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["winmm", "ole32", "shell32", "User32"])
if self._openal_cxx_backend:
libcxx = tools.stdcpp_library(self)
if libcxx:
self.cpp_info.system_libs.append(libcxx)
if not self.options.shared:
self.cpp_info.defines.append("AL_LIBTYPE_STATIC")
| python | 7,092 |
"""
GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Parser classes and rules for parsing C++ classes.
Author: Duy Nguyen Ta, Fan Jiang, Matthew Sklar, Varun Agrawal, and Frank Dellaert
"""
from typing import Iterable, List, Union
from pyparsing import Literal, Optional, ZeroOrMore
from .enum import Enum
from .function import ArgumentList, ReturnType
from .template import Template
from .tokens import (CLASS, COLON, CONST, IDENT, LBRACE, LPAREN, OPERATOR,
RBRACE, RPAREN, SEMI_COLON, STATIC, VIRTUAL)
from .type import TemplatedType, Typename
from .utils import collect_namespaces
from .variable import Variable
class Method:
"""
Rule to parse a method in a class.
E.g.
```
class Hello {
void sayHello() const;
};
```
"""
rule = (
Optional(Template.rule("template")) #
+ ReturnType.rule("return_type") #
+ IDENT("name") #
+ LPAREN #
+ ArgumentList.rule("args_list") #
+ RPAREN #
+ Optional(CONST("is_const")) #
+ SEMI_COLON # BR
).setParseAction(lambda t: Method(t.template, t.name, t.return_type, t.
args_list, t.is_const))
def __init__(self,
template: str,
name: str,
return_type: ReturnType,
args: ArgumentList,
is_const: str,
parent: Union[str, "Class"] = ''):
self.template = template
self.name = name
self.return_type = return_type
self.args = args
self.is_const = is_const
self.parent = parent
def __repr__(self) -> str:
return "Method: {} {} {}({}){}".format(
self.template,
self.return_type,
self.name,
self.args,
self.is_const,
)
class StaticMethod:
"""
Rule to parse all the static methods in a class.
E.g.
```
class Hello {
static void changeGreeting();
};
```
"""
rule = (
STATIC #
+ ReturnType.rule("return_type") #
+ IDENT("name") #
+ LPAREN #
+ ArgumentList.rule("args_list") #
+ RPAREN #
+ SEMI_COLON # BR
).setParseAction(
lambda t: StaticMethod(t.name, t.return_type, t.args_list))
def __init__(self,
name: str,
return_type: ReturnType,
args: ArgumentList,
parent: Union[str, "Class"] = ''):
self.name = name
self.return_type = return_type
self.args = args
self.parent = parent
def __repr__(self) -> str:
return "static {} {}{}".format(self.return_type, self.name, self.args)
def to_cpp(self) -> str:
"""Generate the C++ code for wrapping."""
return self.name
class Constructor:
"""
Rule to parse the class constructor.
Can have 0 or more arguments.
"""
rule = (
IDENT("name") #
+ LPAREN #
+ ArgumentList.rule("args_list") #
+ RPAREN #
+ SEMI_COLON # BR
).setParseAction(lambda t: Constructor(t.name, t.args_list))
def __init__(self,
name: str,
args: ArgumentList,
parent: Union["Class", str] = ''):
self.name = name
self.args = args
self.parent = parent
def __repr__(self) -> str:
return "Constructor: {}".format(self.name)
class Operator:
"""
Rule for parsing operator overloads.
E.g.
```
class Overload {
Vector2 operator+(const Vector2 &v) const;
};
"""
rule = (
ReturnType.rule("return_type") #
+ Literal("operator")("name") #
+ OPERATOR("operator") #
+ LPAREN #
+ ArgumentList.rule("args_list") #
+ RPAREN #
+ CONST("is_const") #
+ SEMI_COLON # BR
).setParseAction(lambda t: Operator(t.name, t.operator, t.return_type, t.
args_list, t.is_const))
def __init__(self,
name: str,
operator: str,
return_type: ReturnType,
args: ArgumentList,
is_const: str,
parent: Union[str, "Class"] = ''):
self.name = name
self.operator = operator
self.return_type = return_type
self.args = args
self.is_const = is_const
self.is_unary = len(args) == 0
self.parent = parent
# Check for valid unary operators
if self.is_unary and self.operator not in ('+', '-'):
raise ValueError("Invalid unary operator {} used for {}".format(
self.operator, self))
# Check that number of arguments are either 0 or 1
assert 0 <= len(args) < 2, \
"Operator overload should be at most 1 argument, " \
"{} arguments provided".format(len(args))
# Check to ensure arg and return type are the same.
if len(args) == 1 and self.operator not in ("()", "[]"):
assert args.args_list[0].ctype.typename.name == return_type.type1.typename.name, \
"Mixed type overloading not supported. Both arg and return type must be the same."
def __repr__(self) -> str:
return "Operator: {}{}{}({}) {}".format(
self.return_type,
self.name,
self.operator,
self.args,
self.is_const,
)
class Class:
"""
Rule to parse a class defined in the interface file.
E.g.
```
class Hello {
...
};
```
"""
class Members:
"""
Rule for all the members within a class.
"""
rule = ZeroOrMore(Constructor.rule #
^ StaticMethod.rule #
^ Method.rule #
^ Variable.rule #
^ Operator.rule #
^ Enum.rule #
).setParseAction(lambda t: Class.Members(t.asList()))
def __init__(self,
members: List[Union[Constructor, Method, StaticMethod,
Variable, Operator]]):
self.ctors = []
self.methods = []
self.static_methods = []
self.properties = []
self.operators = []
self.enums = []
for m in members:
if isinstance(m, Constructor):
self.ctors.append(m)
elif isinstance(m, Method):
self.methods.append(m)
elif isinstance(m, StaticMethod):
self.static_methods.append(m)
elif isinstance(m, Variable):
self.properties.append(m)
elif isinstance(m, Operator):
self.operators.append(m)
elif isinstance(m, Enum):
self.enums.append(m)
_parent = COLON + (TemplatedType.rule ^ Typename.rule)("parent_class")
rule = (
Optional(Template.rule("template")) #
+ Optional(VIRTUAL("is_virtual")) #
+ CLASS #
+ IDENT("name") #
+ Optional(_parent) #
+ LBRACE #
+ Members.rule("members") #
+ RBRACE #
+ SEMI_COLON # BR
).setParseAction(lambda t: Class(
t.template,
t.is_virtual,
t.name,
t.parent_class,
t.members.ctors,
t.members.methods,
t.members.static_methods,
t.members.properties,
t.members.operators,
t.members.enums
))
def __init__(
self,
template: Template,
is_virtual: str,
name: str,
parent_class: list,
ctors: List[Constructor],
methods: List[Method],
static_methods: List[StaticMethod],
properties: List[Variable],
operators: List[Operator],
enums: List[Enum],
parent: str = '',
):
self.template = template
self.is_virtual = is_virtual
self.name = name
if parent_class:
# If it is in an iterable, extract the parent class.
if isinstance(parent_class, Iterable):
parent_class = parent_class[0]
# If the base class is a TemplatedType,
# we want the instantiated Typename
if isinstance(parent_class, TemplatedType):
parent_class = parent_class.typename
self.parent_class = parent_class
else:
self.parent_class = ''
self.ctors = ctors
self.methods = methods
self.static_methods = static_methods
self.properties = properties
self.operators = operators
self.enums = enums
self.parent = parent
# Make sure ctors' names and class name are the same.
for ctor in self.ctors:
if ctor.name != self.name:
raise ValueError("Error in constructor name! {} != {}".format(
ctor.name, self.name))
for ctor in self.ctors:
ctor.parent = self
for method in self.methods:
method.parent = self
for static_method in self.static_methods:
static_method.parent = self
for _property in self.properties:
_property.parent = self
def namespaces(self) -> list:
"""Get the namespaces which this class is nested under as a list."""
return collect_namespaces(self)
def __repr__(self):
return "Class: {self.name}".format(self=self)
| python | 9,797 |
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _LI
from cinder import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('snapshot', 'snapshot_unmanage')
class SnapshotUnmanageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotUnmanageController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
@wsgi.response(202)
@wsgi.action('os-unmanage')
def unmanage(self, req, id, body):
"""Stop managing a snapshot.
This action is very much like a delete, except that a different
method (unmanage) is called on the Cinder driver. This has the effect
of removing the snapshot from Cinder management without actually
removing the backend storage object associated with it.
There are no required parameters.
A Not Found error is returned if the specified snapshot does not exist.
"""
context = req.environ['cinder.context']
authorize(context)
LOG.info(_LI("Unmanage snapshot with id: %s"), id, context=context)
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot,
unmanage_only=True)
except exception.SnapshotNotFound as ex:
raise exc.HTTPNotFound(explanation=ex.msg)
except exception.InvalidSnapshot as ex:
raise exc.HTTPBadRequest(explanation=ex.msg)
return webob.Response(status_int=202)
class Snapshot_unmanage(extensions.ExtensionDescriptor):
"""Enable volume unmanage operation."""
name = "SnapshotUnmanage"
alias = "os-snapshot-unmanage"
namespace = ('http://docs.openstack.org/snapshot/ext/snapshot-unmanage'
'/api/v1')
updated = "2014-12-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotUnmanageController()
extension = extensions.ControllerExtension(self, 'snapshots',
controller)
return [extension]
| python | 2,898 |
"""Sensor support for Skybell Doorbells."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ENTITY_NAMESPACE, CONF_MONITORED_CONDITIONS
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .entity import DOMAIN, SkybellEntity
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="chime_level",
name="Chime Level",
icon="mdi:bell-ring",
),
)
MONITORED_CONDITIONS = SENSOR_TYPES
# Deprecated in Home Assistant 2022.6
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_ENTITY_NAMESPACE, default=DOMAIN): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]
),
}
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Skybell sensor."""
async_add_entities(
SkybellSensor(coordinator, description)
for coordinator in hass.data[DOMAIN][entry.entry_id]
for description in SENSOR_TYPES
)
class SkybellSensor(SkybellEntity, SensorEntity):
"""A sensor implementation for Skybell devices."""
@property
def native_value(self) -> int:
"""Return the state of the sensor."""
return self._device.outdoor_chime_level
| python | 1,679 |
# from recourse.path import *
import os
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
def load_credit_data():
# input vars
data_name = 'credit'
raw_data_file = os.path.join(os.path.dirname(__file__), 'credit_raw.csv')
processed_file = os.path.join(os.path.dirname(__file__), 'credit_processed.csv')
##### Credit Data Processing
raw_df = pd.read_csv(raw_data_file, index_col = 0)
processed_df = pd.DataFrame()
# convert NTD to USD using spot rate in 09-2005
NTD_to_USD = 32.75 # see https://www.poundsterlinglive.com/bank-of-england-spot/historical-spot-exchange-rates/usd/USD-to-TWD-2005
monetary_features = list(filter(lambda x: ('BILL_AMT' in x) or ('PAY_AMT' in x) or ('LIMIT_BAL' in x), raw_df.columns))
raw_df[monetary_features] = raw_df[monetary_features].applymap(lambda x: x / NTD_to_USD).round(-1).astype(int)
# outcome variable in first column
processed_df['NoDefaultNextMonth (label)'] = 1 - raw_df['default payment next month (label)']
# Gender (old; male = 1, female = 2) --> (new; male = 0, female = 1)
# <Removed by Berk> processed_df['Female'] = raw_df['SEX'] == 2
processed_df.loc[raw_df['SEX'] == 1, 'isMale'] = True
processed_df.loc[raw_df['SEX'] == 2, 'isMale'] = False
# Married (old; married = 1; single = 2; other = 3) --> (new; married = 1; single = 2; other = 3)
# <Removed by Amir> processed_df['Married'] = raw_df['MARRIAGE'] == 1
# <Removed by Amir> processed_df['Single'] = raw_df['MARRIAGE'] == 2
processed_df.loc[raw_df.MARRIAGE == 1, 'isMarried'] = True # married (use T/F, but not 1/0, so that some values become NAN and can be dropped later!)
processed_df.loc[raw_df.MARRIAGE == 2, 'isMarried'] = False # single (use T/F, but not 1/0, so that some values become NAN and can be dropped later!)
# <Set to NAN by Amir> processed_df.loc[raw_df.MARRIAGE == 0, 'isMarried'] = 3 # other
# <Set to NAN by Amir> processed_df.loc[raw_df.MARRIAGE == 3, 'isMarried'] = 3 # other
# Age
# <Removed by Amir> processed_df['Age_lt_25'] = raw_df['AGE'] < 25
# <Removed by Amir> processed_df['Age_in_25_to_40'] = raw_df['AGE'].between(25, 40, inclusive = True)
# <Removed by Amir> processed_df['Age_in_40_to_59'] = raw_df['AGE'].between(40, 59, inclusive = True)
# <Removed by Amir> processed_df['Age_geq_60'] = raw_df['AGE'] >= 60
processed_df.loc[raw_df['AGE'] < 25, 'AgeGroup'] = 1
processed_df.loc[raw_df['AGE'].between(25, 40, inclusive = True), 'AgeGroup'] = 2
processed_df.loc[raw_df['AGE'].between(40, 59, inclusive = True), 'AgeGroup'] = 3
processed_df.loc[raw_df['AGE'] >= 60, 'AgeGroup'] = 4
# EducationLevel (currently, 1 = graduate school; 2 = university; 3 = high school; 4 = others)
processed_df['EducationLevel'] = 1
processed_df['EducationLevel'][raw_df['EDUCATION'] == 3] = 2 # HS
processed_df['EducationLevel'][raw_df['EDUCATION'] == 2] = 3 # University
processed_df['EducationLevel'][raw_df['EDUCATION'] == 1] = 4 # Graduate
# Process Bill Related Variables
pay_columns = ['PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
bill_columns = ['BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6']
#processed_df['LastBillAmount'] = np.maximum(raw_df['BILL_AMT1'], 0)
processed_df['MaxBillAmountOverLast6Months'] = np.maximum(raw_df[bill_columns].max(axis = 1), 0)
processed_df['MaxPaymentAmountOverLast6Months'] = np.maximum(raw_df[pay_columns].max(axis = 1), 0)
processed_df['MonthsWithZeroBalanceOverLast6Months'] = np.sum(np.greater(raw_df[pay_columns].values, raw_df[bill_columns].values), axis = 1)
processed_df['MonthsWithLowSpendingOverLast6Months'] = np.sum(raw_df[bill_columns].div(raw_df['LIMIT_BAL'], axis = 0) < 0.20, axis = 1)
processed_df['MonthsWithHighSpendingOverLast6Months'] = np.sum(raw_df[bill_columns].div(raw_df['LIMIT_BAL'], axis = 0) > 0.80, axis = 1)
processed_df['MostRecentBillAmount'] = np.maximum(raw_df[bill_columns[0]], 0)
processed_df['MostRecentPaymentAmount'] = np.maximum(raw_df[pay_columns[0]], 0)
# Credit History
# PAY_M' = months since last payment (as recorded last month)
# PAY_6 = months since last payment (as recorded 6 months ago)
# PAY_M = -1 if paid duly in month M
# PAY_M = -2 if customer was issued refund M
raw_df = raw_df.rename(columns = {'PAY_0': 'MonthsOverdue_1',
'PAY_2': 'MonthsOverdue_2',
'PAY_3': 'MonthsOverdue_3',
'PAY_4': 'MonthsOverdue_4',
'PAY_5': 'MonthsOverdue_5',
'PAY_6': 'MonthsOverdue_6'})
overdue = ['MonthsOverdue_%d' % j for j in range(1, 7)]
raw_df[overdue] = raw_df[overdue].replace(to_replace = [-2, -1], value = [0, 0])
overdue_history = raw_df[overdue].to_numpy() > 0
payment_history = np.logical_not(overdue_history)
def count_zero_streaks(a):
#adapted from zero_runs function of https://stackoverflow.com/a/24892274/568249
iszero = np.concatenate(([0], np.equal(a, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
runs = np.where(absdiff == 1)[0].reshape(-1, 2)
n_streaks = runs.shape[0]
#streak_lengths = np.sum(runs[:,1] - runs[:,0])
return n_streaks
overdue_counts = np.repeat(np.nan, len(raw_df))
n_overdue_months = np.sum(overdue_history > 0, axis = 1)
overdue_counts[n_overdue_months == 0] = 0 # count_zero_streaks doesn't work for edge cases
overdue_counts[n_overdue_months == 6] = 1
for k in range(1, len(overdue)):
idx = n_overdue_months == k
overdue_counts[idx] = [count_zero_streaks(a) for a in payment_history[idx, :]]
overdue_counts = overdue_counts.astype(np.int_)
processed_df['TotalOverdueCounts'] = overdue_counts
processed_df['TotalMonthsOverdue'] = raw_df[overdue].sum(axis = 1)
processed_df['HasHistoryOfOverduePayments'] = raw_df[overdue].sum(axis = 1) > 0
# Save to CSV
processed_df = processed_df + 0 # convert boolean values to numeric
processed_df = processed_df.reset_index(drop = True)
processed_df = processed_df.dropna() # drop all rows that include NAN (some exist in isMarried column, possibly elsewhere as well)
processed_df.to_csv(processed_file, header = True, index = False)
assert(processed_df.shape[0] == 29623)
return processed_df.astype('float64')
| python | 6,397 |
"""
1752. Check if Array Is Sorted and Rotated
Easy
Given an array nums, return true if the array was originally sorted in non-decreasing order, then rotated some number of positions (including zero). Otherwise, return false.
There may be duplicates in the original array.
Note: An array A rotated by x positions results in an array B of the same length such that A[i] == B[(i+x) % A.length], where % is the modulo operation.
Example 1:
Input: nums = [3,4,5,1,2]
Output: true
Explanation: [1,2,3,4,5] is the original sorted array.
You can rotate the array by x = 3 positions to begin on the the element of value 3: [3,4,5,1,2].
Example 2:
Input: nums = [2,1,3,4]
Output: false
Explanation: There is no sorted array once rotated that can make nums.
Example 3:
Input: nums = [1,2,3]
Output: true
Explanation: [1,2,3] is the original sorted array.
You can rotate the array by x = 0 positions (i.e. no rotation) to make nums.
Example 4:
Input: nums = [1,1,1]
Output: true
Explanation: [1,1,1] is the original sorted array.
You can rotate any number of positions to make nums.
Example 5:
Input: nums = [2,1]
Output: true
Explanation: [1,2] is the original sorted array.
You can rotate the array by x = 5 positions to begin on the element of value 2: [2,1].
Constraints:
1 <= nums.length <= 100
1 <= nums[i] <= 100
"""
class Solution:
def check(self, nums: List[int]) -> bool:
return sum(nums[i] < nums[i-1] for i in range(len(nums))) <= 1 | python | 1,461 |
from unittest import TestCase, main
from day23_feature.solution import solution
class MyTestCase(TestCase):
def test_something(self):
self.assertEqual([2, 1], solution([93, 30, 55], [1, 30, 5]))
if __name__ == '__main__':
main()
| python | 249 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetApiVersionSetResult',
'AwaitableGetApiVersionSetResult',
'get_api_version_set',
]
@pulumi.output_type
class GetApiVersionSetResult:
"""
Api Version Set Contract details.
"""
def __init__(__self__, description=None, display_name=None, id=None, name=None, type=None, version_header_name=None, version_query_name=None, versioning_scheme=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version_header_name and not isinstance(version_header_name, str):
raise TypeError("Expected argument 'version_header_name' to be a str")
pulumi.set(__self__, "version_header_name", version_header_name)
if version_query_name and not isinstance(version_query_name, str):
raise TypeError("Expected argument 'version_query_name' to be a str")
pulumi.set(__self__, "version_query_name", version_query_name)
if versioning_scheme and not isinstance(versioning_scheme, str):
raise TypeError("Expected argument 'versioning_scheme' to be a str")
pulumi.set(__self__, "versioning_scheme", versioning_scheme)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of API Version Set.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Name of API Version Set
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="versionHeaderName")
def version_header_name(self) -> Optional[str]:
"""
Name of HTTP header parameter that indicates the API Version if versioningScheme is set to `header`.
"""
return pulumi.get(self, "version_header_name")
@property
@pulumi.getter(name="versionQueryName")
def version_query_name(self) -> Optional[str]:
"""
Name of query parameter that indicates the API Version if versioningScheme is set to `query`.
"""
return pulumi.get(self, "version_query_name")
@property
@pulumi.getter(name="versioningScheme")
def versioning_scheme(self) -> str:
"""
An value that determines where the API Version identifier will be located in a HTTP request.
"""
return pulumi.get(self, "versioning_scheme")
class AwaitableGetApiVersionSetResult(GetApiVersionSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiVersionSetResult(
description=self.description,
display_name=self.display_name,
id=self.id,
name=self.name,
type=self.type,
version_header_name=self.version_header_name,
version_query_name=self.version_query_name,
versioning_scheme=self.versioning_scheme)
def get_api_version_set(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
version_set_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiVersionSetResult:
"""
Api Version Set Contract details.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
:param str version_set_id: Api Version Set identifier. Must be unique in the current API Management service instance.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['versionSetId'] = version_set_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20201201:getApiVersionSet', __args__, opts=opts, typ=GetApiVersionSetResult).value
return AwaitableGetApiVersionSetResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type,
version_header_name=__ret__.version_header_name,
version_query_name=__ret__.version_query_name,
versioning_scheme=__ret__.versioning_scheme)
| python | 5,937 |
"""
MIT License
Copyright (c) 2021 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from .constants import ConfigOpt, DISCORD_CONVERTERS
from .errors import *
import discord
import inspect
import json
options = dict()
class Config:
__slots__ = ('options', 'loaded', '_bot', '_guild', '_db', '_data')
def __init__(self, guild, **kwargs):
self.options = options
self.loaded: bool = False
self._bot = kwargs.pop('bot')
self._guild = self._bot.get_guild(guild) or guild
self._db = kwargs.pop('db')
self._data = {}
@ConfigOpt(name='main.prefix', accepts=str, default='$', options=options)
async def prefix(self, value: str):
'''Prefix | The prefix used before all Fire commands'''
self._bot.logger.info(
f'$GREENSetting $CYANmain.prefix $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('main.prefix', value)
@ConfigOpt(name='main.fetch_offline', accepts=bool, default=True, hidden=True, options=options)
async def fetch_offline(self, value: bool):
'''Fetch Offline | Manually set by Geek#8405 for larger guilds that do not need all members cached'''
self._bot.logger.info(
f'$GREENSetting $CYANmain.fetch_offline $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('main.fetch_offline', value)
@ConfigOpt(name='log.moderation', accepts=discord.TextChannel, default=None, options=options)
async def mod_logs(self, value: discord.TextChannel):
'''Moderation Logs | The channel where moderation actions are logged'''
self._bot.logger.info(
f'$GREENSetting $CYANlog.moderation $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('log.moderation', str(value.id))
@ConfigOpt(name='log.action', accepts=discord.TextChannel, default=None, options=options)
async def action_logs(self, value: discord.TextChannel):
'''Action Logs | The channel where miscellaneous actions are logged, e.g. deleted messages'''
self._bot.logger.info(
f'$GREENSetting $CYANlog.action $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('log.action', str(value.id))
@ConfigOpt(name='mod.mutedrole', accepts=discord.Role, default=None, options=options)
async def muted_role(self, value: discord.Role):
'''Muted Role | The role which will be used when muting a user. If not set, it will default to a role called "Muted"'''
self._bot.logger.info(
f'$GREENSetting $CYANmod.mutedrole $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('mod.mutedrole', str(value.id))
@ConfigOpt(name='mod.antieveryone', accepts=bool, default=None, options=options)
async def anti_everyone(self, value: bool):
'''Anti Everyone | Prevents those without permission from sending messages containing @everyone (as they're most likely just advertising/sending copypasta etc.'''
self._bot.logger.info(
f'$GREENSetting $CYANmod.antieveryone $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('mod.antieveryone', str(value.id))
@ConfigOpt(name='mod.linkfilter', accepts=[str], default=[], options=options)
async def link_filter(self, value: list):
'''Link Filter | The filters of which any links found will be deleted (unless they have Manage Messages)'''
return
@ConfigOpt(name='excluded.filter', accepts=[str], default=[], options=options)
async def filter_exclude(self, value: list):
'''Filter Exclusion | Channel, role and user IDs that are excluded from link filters and duplicate message deletion'''
return
@ConfigOpt(name='mod.globalbans', accepts=bool, default=False, options=options)
async def global_bans(self, value: bool):
'''Global Bans | Global ban checking on member join, powered by KSoft.Si API'''
self._bot.logger.info(
f'$GREENSetting $CYANmod.globalbans $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('mod.globalbans', value)
@ConfigOpt(name='mod.autodecancer', accepts=bool, default=False, options=options)
async def auto_decancer(self, value: bool):
'''Auto Decancer | Renames those with "cancerous" names (non-ascii chars) to John Doe'''
return
@ConfigOpt(name='mod.autodehoist', accepts=bool, default=False, options=options)
async def auto_dehoist(self, value: bool):
'''Auto Dehoist | Renames those with "hoisted" names (starts with non a-z char) to John Doe'''
return
@ConfigOpt(name='utils.ranks', accepts=[discord.Role], default=[], options=options, premium=True)
async def ranks(self, value: list):
'''Ranks | Roles users can join via the ranks command'''
self._bot.logger.info(
f'$GREENSetting $CYANutils.ranks $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('utils.ranks', [str(r.id) for r in value])
@ConfigOpt(name='mod.autorole', accepts=discord.Role, default=None, options=options, premium=True)
async def auto_role(self, value: discord.Role):
'''Auto Role (Premium) | The role given to users upon joining the server'''
self._bot.logger.info(
f'$GREENSetting $CYANmod.autorole $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('mod.autorole', str(value.id))
@ConfigOpt(name='mod.autorole.waitformsg', accepts=bool, default=False, options=options, premium=True)
async def auto_role(self, value: discord.Role):
'''Auto Role - Wait for message | Waits for the user to send a message to give the auto role'''
self._bot.logger.info(
f'$GREENSetting $CYANmod.autorole.waitformsg $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('mod.autorole.waitformsg', value)
@ConfigOpt(name='commands.modonly', accepts=[discord.TextChannel], default=[], options=options)
async def mod_only(self, value: list):
'''Moderator Channels | The channels where only moderators can run commands'''
self._bot.logger.info(
f'$GREENSetting $CYANcommands.modonly $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('commands.modonly', [str(c.id) for c in value])
@ConfigOpt(name='commands.adminonly', accepts=[discord.TextChannel], default=[], options=options)
async def admin_only(self, value: list):
'''Admin channels | The channels where only admins can run commands'''
self._bot.logger.info(
f'$GREENSetting $CYANcommands.adminonly $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('commands.adminonly', [str(c.id) for c in value])
@ConfigOpt(name='greet.joinchannel', accepts=discord.TextChannel, default=None, options=options)
async def join_channel(self, value: discord.TextChannel):
'''Join Message Channel | The channel where join messages are sent'''
self._bot.logger.info(
f'$GREENSetting $CYANgreet.joinchannel $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('greet.joinchannel', str(value.id))
@ConfigOpt(name='greet.leavechannel', accepts=discord.TextChannel, default=None, options=options)
async def leave_channel(self, value: discord.TextChannel):
'''Leave Message Channel | The channel where leave messages are sent'''
self._bot.logger.info(
f'$GREENSetting $CYANgreet.leavechannel $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('greet.leavechannel', str(value.id))
@ConfigOpt(name='greet.joinmsg', accepts=str, default=None, options=options)
async def join_message(self, value: str):
'''Join Message | The server's custom join message'''
self._bot.logger.info(
f'$GREENSetting $CYANgreet.joinmsg $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('greet.joinmsg', value)
@ConfigOpt(name='greet.leavemsg', accepts=str, default=None, options=options)
async def leave_message(self, value: str):
'''Leave Message | The server's custom leave message'''
self._bot.logger.info(
f'$GREENSetting $CYANgreet.leavemsg $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('greet.leavemsg', value)
@ConfigOpt(name='disabled.commands', accepts=[str], default=[], options=options)
async def disabled_commands(self, value: list):
'''Disabled Commands | Commands that can only be ran by moderators (those with Manage Messages permission)'''
[value.remove(v) for v in value if not self._bot.get_command(v)]
self._bot.logger.info(
f'$GREENSetting $CYANdisabled.commands $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('disabled.commands', value)
@ConfigOpt(name='utils.autoquote', accepts=bool, default=False, options=options)
async def auto_quote(self, value: bool):
'''Automatic Quotes | Automatically quotes messages when a message link is sent'''
self._bot.logger.info(
f'$GREENSetting $CYANutils.autoquote $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('utils.autoquote', value)
@ConfigOpt(name='utils.quotehooks', accepts=bool, default=True, options=options)
async def quote_hooks(self, value: bool):
'''Quote Webhooks | Whether or not to use webhooks for quoting/snipes'''
self._bot.logger.info(
f'$GREENSetting $CYANutils.quotehooks $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('utils.quotehooks', value)
@ConfigOpt(name='utils.badname', accepts=str, default=None, options=options)
async def bad_name(self, value: str):
'''Bad Name | The name used for decancer and dehoist. If not set, John Doe + discrim is used'''
return
@ConfigOpt(name='utils.public', accepts=bool, default=False, options=options)
async def public_guild(self, value: bool):
'''Public Guild | Makes your server viewable on https://fire.gaminggeek.space/discover (and joinable if a vanity url is set)'''
self._bot.logger.info(
f'$GREENSetting $CYANutils.public $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('utils.public', value)
@ConfigOpt(name='tickets.parent', accepts=discord.CategoryChannel, default=None, options=options)
async def ticket_parent(self, value: discord.CategoryChannel):
'''Tickets Category | The category where ticket channels are created. If this is not set, tickets are disabled'''
self._bot.logger.info(
f'$GREENSetting $CYANtickets.parent $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('tickets.parent', str(value.id))
@ConfigOpt(name='tickets.allow_override', accepts=bool, default=False, restricted=[755794954743185438], options=options)
async def ticket_override(self, value: bool):
'''Tickets Override | Allows the ticket category to be overriden each command run'''
self._bot.logger.info(
f'$GREENSetting $CYANtickets.allow_override $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('tickets.allow_override', value)
@ConfigOpt(name='tickets.transcript_logs', accepts=discord.TextChannel, default=None, restricted=[755794954743185438], options=options)
async def ticket_transcript_logs(self, value: discord.TextChannel):
'''Tickets Transcript Logs | A seperate channel for transcripts to be logged in'''
self._bot.logger.info(
f'$GREENSetting $CYANtickets.transcript_logs $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('tickets.transcript_logs', str(value.id))
@ConfigOpt(name='tickets.increment', accepts=int, default=0, options=options)
async def ticket_increment(self, value: int):
'''Ticket Increment | The number tickets will start incrementing from'''
await self.update('tickets.increment', value)
@ConfigOpt(name='tickets.limit', accepts=int, default=0, options=options)
async def ticket_limit(self, value: int):
'''Ticket Limit | The number tickets a user can open, 0 = Unlimited'''
self._bot.logger.info(
f'$GREENSetting $CYANtickets.limit $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('tickets.limit', value)
@ConfigOpt(name='tickets.name', accepts=str, default='ticket-{increment}', options=options)
async def ticket_name(self, value: str):
'''Ticket Name | The name used for ticket channels'''
self._bot.logger.info(
f'$GREENSetting $CYANtickets.name $GREENto $CYAN{value} $GREENfor guild $CYAN{self._guild}')
await self.update('tickets.name', value)
@ConfigOpt(name='tickets.channels', accepts=[discord.TextChannel], default=[], options=options, hidden=True)
async def ticket_channels(self, value: list):
'''Ticket Channels | All ticket channels in the guild'''
await self.update('tickets.channels', [str(v.id) for v in value if v])
def get(self, option):
if option not in self.options:
raise InvalidOptionError(option)
if self.options[option]['premium'] and self._guild.id not in self._bot.premium_guilds:
# Return default value if not premium :)
return self.options[option]['default']
if self.options[option]['restricted'] and self._guild.id not in self.options[option]['restricted']:
# Return default value if restricted :)
return self.options[option]['default']
if option not in self._data:
# Return default value if it's not even in the config :)
return self.options[option]['default']
accept = self.options[option]['accepts']
acceptlist = False
if isinstance(self._guild, discord.Guild):
converter = None
if isinstance(accept, list):
accept = accept[0]
acceptlist = True
if accept in DISCORD_CONVERTERS['bot']:
converter = getattr(
self._bot, DISCORD_CONVERTERS['bot'][accept])
elif accept in DISCORD_CONVERTERS['guild']:
converter = getattr(
self._guild, DISCORD_CONVERTERS['guild'][accept])
if converter and inspect.ismethod(converter):
if acceptlist:
return [converter(int(d)) for d in self._data[option] if d]
return converter(int(self._data[option]))
return self._data[option]
async def set(self, opt: str, value):
if opt not in self.options:
raise InvalidOptionError(opt)
option = self.options[opt]
if value == option['default']: # Bypass all checks if default
await self.update(opt, value)
return self.get(opt)
if option['premium'] and self._guild.id not in self._bot.premium_guilds:
raise RestrictedOptionError(opt, 'premium guilds only')
if option['restricted'] and self._guild.id not in option['restricted']:
raise RestrictedOptionError(opt, 'select guilds only')
setter = option['setter']
if not inspect.isfunction(setter):
raise OptionConfigError(option)
if not isinstance(option['accepts'], list) and not isinstance(value, option['accepts']) and value is not None:
raise TypeMismatchError(
type=value.__class__.__name__, accepted=option['accepts'].__name__, option=opt)
if isinstance(option['accepts'], list):
accepts = option['accepts'][0]
if not isinstance(value, list) or any(not isinstance(v, accepts) for v in value):
if isinstance(value, list) and len(value) >= 1:
raise TypeMismatchError(type=[t.__class__.__name__ for t in value if not isinstance(
t, accepts)], accepted=[t.__name__ for t in option['accepts']], option=opt)
raise TypeMismatchError(
type=value.__class__.__name__, accepted=option['accepts'].__class__.__name__, option=opt)
await setter(self, value)
return self.get(opt)
async def update(self, option: str, value):
changed = False # Don't need to save if nothing changed lol
default = self.options[option]['default']
if value == default:
v = self._data.pop(option, None)
changed = True if v else False
elif self._data.get(option, None) != value:
self._data[option] = value
changed = True
if changed:
await self.save()
async def load(self):
if isinstance(self._guild, int):
self._guild = self._bot.get_guild(self._guild)
query = 'SELECT * FROM guildconfig WHERE gid=$1;'
conf = await self._db.fetch(query, str(self._guild.id))
if not conf:
self._data = await self.init()
self.loaded = True
return
else:
self._data = json.loads(conf[0]['data'])
items = [(k, v) for k, v in tuple(self._data.items())]
for opt, val in items:
if opt not in self.options:
self._data.pop(opt)
elif val == self.options[opt]['default']:
self._data.pop(opt)
self.loaded = True
async def save(self):
con = await self._db.acquire()
async with con.transaction():
query = 'UPDATE guildconfig SET data = $1 WHERE gid = $2;'
await self._db.execute(query, json.dumps(self._data), str(self._guild.id))
await self._db.release(con)
self._bot.logger.info(f'$GREENSaved config for $CYAN{self._guild}')
async def init(self):
con = await self._db.acquire()
async with con.transaction():
query = 'INSERT INTO guildconfig (\"gid\", \"data\") VALUES ($1, $2);'
await self._db.execute(query, str(self._guild.id), json.dumps({}))
await self._db.release(con)
self._bot.logger.info(f'$GREENInitiated config for $CYAN{self._guild}')
return {}
def __repr__(self):
return f'<Config guild={self._guild} loaded={self.loaded}>'
def __str__(self):
return f'<Config guild={self._guild} loaded={self.loaded}>'
| python | 19,736 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ....common._registration import register_converter
def convert_concat(scope, operator, container):
op_type = 'Concat'
attrs = {'name': operator.full_name}
if operator.raw_operator.concat.sequenceConcat:
attrs['axis'] = 0
else:
attrs['axis'] = 1
container.add_node(op_type, operator.input_full_names, operator.output_full_names, **attrs)
register_converter('concat', convert_concat)
| python | 739 |
from Pipeline.models import models as md
import numpy as np
import pandas as pd
from itertools import product
import os
class HyperTunning(object):
def __init__(self, base_params, params_range, model_info, metrics, model_type='regressor'):
self._base_params = base_params
self._params_range = params_range
self._model_info = model_info
self._metrics = metrics
self._model_info_class_name = model_type.capitalize() + "Info"
self._mount_model_class_name = 'Mount' + model_type.capitalize()
self._iter_range = None
self._all_params_combinations = None
self._hyperparam_tunning_results = None
self._predictions = pd.DataFrame()
self._mount_all_combinations()
self._mount_metrics_info()
def _mount_all_combinations(self):
params_list = [self._params_range[key] for key in self._params_range.keys()]
combinations = list(product(*params_list))
columns = self._params_range.keys()
self._all_params_combinations = pd.DataFrame(data=combinations, columns=columns)
self._iter_range = iter(range(len(self._all_params_combinations)))
return None
def _mount_base_dataframe(self, columns):
base_dataframe = pd.DataFrame(columns=columns)
return base_dataframe
def _mount_predictions_info(self, id_model, prediction):
self._predictions[f'{id_model}'] = prediction
return None
def _mount_metrics_info(self):
var_params = list(self._params_range.keys())
metrics = [metric.get('name') for metric in self._metrics]
self._hyperparam_tunning_results = self._mount_base_dataframe(['id'] + var_params + metrics)
for index, row in self._all_params_combinations.iterrows():
variable_parameters = dict(row)
params = dict(**variable_parameters, **self._base_params)
params['n_estimators'] = int(params['n_estimators'])
self._model_info['model']['params'] = params
model_info = eval(f'md.{self._model_info_class_name}(**self._model_info)')
model = eval(f'md.{self._mount_model_class_name}(model_info)')
data = dict()
data['id'] = next(self._iter_range)
for metric in self._metrics:
model.add_metric(metric)
data[metric['name']] = eval(f'model.{metric["name"]}')
data = dict(**data, **variable_parameters)
self._hyperparam_tunning_results =\
self._hyperparam_tunning_results.append(data, ignore_index=True)
self._mount_predictions_info(f'id - {data["id"]}', model.validation_data['prediction'])
# Include Resultado into predictions_info
self._mount_predictions_info('Resultado', model.validation_data['Resultado'])
# Convert id column type just to equal id column with each value of
# predictions dataframe.
self._hyperparam_tunning_results =\
self._hyperparam_tunning_results.astype({'id': 'int64'})
return None
def save(self, path, filename):
"""
Save files containing needed info to evaluate feature selection info. Two files
are saved, one containing relationships between groups of features and metrics and
another containing all predictions.
args:
-----
path (str) -> path to save feature selection files.
filename (str) -> name used for files.
"""
if not os.path.exists(path):
os.makedirs(path)
print(f'directory created {path}')
hyper_tunning_results_path = ''.join([path, 'metrics_', filename])
predictions_path = ''.join([path, 'predictions_', filename])
print(f'saving {hyper_tunning_results_path}...')
print(f'saving {predictions_path}...')
self._hyperparam_tunning_results.to_csv(hyper_tunning_results_path, sep=';', index=False)
self._predictions.to_csv(predictions_path, index=False)
@property
def predictions(self):
return self._predictions
@property
def hyperparam_tunning_results(self):
return self._hyperparam_tunning_results
@property
def all_params_combinations(self):
return self._all_params_combinations
| python | 4,361 |
from socket import socket, gethostbyname, AF_INET, SOCK_DGRAM
import sys
from directKeys import PressKey, ReleaseKey, W, A
import time
PORT_NUMBER = 5000
SIZE = 1024
hostName = gethostbyname( '192.168.1.125' )
mySocket = socket( AF_INET, SOCK_DGRAM )
mySocket.bind( (hostName, PORT_NUMBER) )
print("Test server listening on port {0}\n".format(PORT_NUMBER))
while True:
(data,addr) = mySocket.recvfrom(SIZE)
meme = data.decode('utf-8')
print(meme)
if meme == 'On':
PressKey(W)
time.sleep(1)
ReleaseKey(W)
else:
ReleaseKey(W)
sys.ext()
| python | 660 |
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
from __future__ import print_function
from docplex.mp.utils import DOcplexException, resolve_pattern, is_int, is_string
from enum import Enum
##########################
# Error handling
class IErrorHandler(object):
def __init__(self):
pass # pragma: no cover
def info(self, msg, args=None):
pass # pragma: no cover
def warning(self, msg, args=None):
pass # pragma: no cover
def error(self, msg, args=None):
pass # pragma: no cover
def fatal(self, msg, args=None):
pass # pragma: no cover
def ok(self):
return False # pragma: no cover
def get_output_level(self):
return 0 # pragma: no cover
def set_output_level(self, new_level):
pass # pragma: no cover
def ensure(self, condition, msg, *args):
if not condition:
self.fatal(msg, args)
class InfoLevel(Enum):
""" Enumerated type for the possible output levels.
Info levels are sorted in increasing order of severity: `INFO`, `WARNING`, `ERROR`, `FATAL`.
Setting a level enables the printing of all messages from that severity and above.
Example:
Setting the level to `WARNING` enables the printing of `WARNING`, `ERROR`, and `FATAL` messages, but not
`INFO` level messages.
Setting the level to `FATAL` suppresses all messages, except for fatal errors.
"""
INFO, WARNING, ERROR, FATAL = 1, 10, 100, 9999999
@classmethod
def parse(cls, arg, default_level=INFO):
# INTERNAL
if not arg:
return default_level
elif isinstance(arg, cls):
return arg
elif is_string(arg):
return cls._name2level_map().get(arg.lower(), default_level)
elif is_int(arg):
if arg < 10:
# anything below 10 is INFO
return cls.INFO
elif arg < 100:
return cls.WARNING
elif arg < 1000:
return cls.ERROR
else:
# level fatal prints nothing except fatal errors
return cls.FATAL
else:
raise DOcplexException("Cannot convert this to InfoLevel: {0!r}".format(arg))
def __str__(self):
return self.name
@staticmethod
def _headers():
return {InfoLevel.FATAL: "FATAL",
InfoLevel.INFO: "*",
InfoLevel.WARNING: "Warning:",
InfoLevel.ERROR: "Error:"
}
@staticmethod
def _name2level_map():
return {"fatal": InfoLevel.FATAL,
"error": InfoLevel.ERROR,
"warning": InfoLevel.WARNING,
"info": InfoLevel.INFO}
def header(self):
# cannot put the dict in the class
# as it willbe interpreted as another enum value.
return self._headers().get(self, "???")
class AbstractErrorHandler(IErrorHandler):
TRACE_HEADER = "--"
def __init__(self, output_level=InfoLevel.INFO):
IErrorHandler.__init__(self)
self._trace_enabled = False
self._number_of_errors = 0
self._number_of_warnings = 0
self._number_of_fatals = 0
self._output_level = InfoLevel.INFO
self._is_print_suspended = False
self._postponed = []
self.set_output_level(output_level)
@property
def number_of_warnings(self):
""" Returns the number of warnings.
"""
return self._number_of_warnings
@property
def number_of_errors(self):
""" Returns the number of errors.
"""
return self._number_of_errors
@property
def number_of_fatals(self):
return self._number_of_fatals
def get_output_level(self):
return self._output_level
def set_output_level(self, output_level_arg):
output_level = InfoLevel.parse(output_level_arg)
if output_level != self._output_level:
self._output_level = output_level
def set_trace_mode(self, trace_mode):
self._trace_enabled = trace_mode
def enable_trace(self):
self.set_trace_mode(True)
def disable_trace(self):
self.set_trace_mode(False)
def is_trace_enabled(self):
return self._trace_enabled
def set_quiet(self):
""" Changes the output level to enable only error messages.
"""
self.set_output_level(InfoLevel.ERROR)
def reset(self):
self._number_of_errors = 0
self._number_of_warnings = 0
self._number_of_fatals = 0
def _internal_is_printed(self, level):
return self._output_level.value <= level.value
def _internal_print_if(self, level, msg, args):
if self._internal_is_printed(level):
self._internal_print(level, msg, args)
def _internal_print(self, level, msg, args):
# resolve message w/ args
header = level.header()
self._internal_print_header(header, msg, args)
def _internal_print_header(self, header, msg, args):
resolved_message = resolve_pattern(msg, args)
mline = '%s %s' % (header, resolved_message)
if self._is_print_suspended:
self._postponed.append(mline)
else:
print(mline)
def trace_header(self):
return self.TRACE_HEADER
def trace(self, msg, args=None):
if self.is_trace_enabled():
self._internal_print_header(self.trace_header(), msg, args)
def info(self, msg, args=None):
self._internal_print_if(InfoLevel.INFO, msg, args)
def warning(self, msg, args=None):
self._number_of_warnings += 1
self._internal_print_if(InfoLevel.WARNING, msg, args)
def error(self, msg, args=None):
docplex_error_stop_here()
self._number_of_errors += 1
self._internal_print_if(InfoLevel.ERROR, msg, args)
def fatal(self, msg, args=None):
self._number_of_fatals += 1
resolved_message = resolve_pattern(msg, args)
docplex_error_stop_here()
raise DOcplexException(resolved_message)
def ok(self):
""" Checks whether the handler has not recorded any error.
"""
return self._number_of_errors == 0 and self._number_of_fatals == 0
def prints_trace(self):
return self.is_trace_enabled()
def prints_info(self):
return self._internal_is_printed(InfoLevel.INFO)
def prints_warning(self):
return self._internal_is_printed(InfoLevel.WARNING)
def prints_error(self):
return self._internal_is_printed(InfoLevel.ERROR)
def suspend(self):
self._is_print_suspended = True
def flush(self):
self._is_print_suspended = False
for m in self._postponed:
print(m)
self._postponed = []
def docplex_error_stop_here():
# INTERNAL, use to set breakpoints
pass
def docplex_add_trivial_infeasible_ct(ct):
# INTERNAL: set breakpoint here to inspect ct
pass
def docplex_fatal(msg, *args):
resolved_message = resolve_pattern(msg, args)
docplex_error_stop_here()
raise DOcplexException(resolved_message)
class DefaultErrorHandler(AbstractErrorHandler):
""" The default error handler class.
"""
def __init__(self, output_level=InfoLevel):
AbstractErrorHandler.__init__(self, output_level)
class SilentErrorHandler(AbstractErrorHandler):
def __init__(self, output_level=InfoLevel):
AbstractErrorHandler.__init__(self, output_level)
def _internal_print(self, level, msg, args):
# nothing out, this is the point!
pass
def suspend(self):
pass
def flush(self):
pass
| python | 8,233 |
from collections import defaultdict
from typing import Dict
from mido import MidiFile
from dataclasses import dataclass
from map_midi_settings_mdbsl import *
from split_type0_midi import subdivide_midi_tracks
import os;
dry_run = False
overwrite = False
new_file_location = 'Modified'
track_names = [
'dun_boss',
'dun_bossfloor',
'dun_forest_1',
'dun_forest_2',
'dun_forest',
'dun_grassy_1',
'dun_grassy_2',
'dun_grassy',
'dun_mount_1',
'dun_mount_2',
'dun_mount',
'dun_sea_1',
'dun_sea_2',
'dun_sea',
'endroll',
'ev_1',
'ev_2',
'ev_3',
'ev_4',
'ev_5',
'ev_ed',
'ev_fear',
'ev_op',
'gameclear',
'gameover',
'me_dunopen',
'me_evolution_e',
'me_evolution',
'me_exclude',
'me_item',
'me_join',
'me_lankup',
'me_lvup',
'me_reward',
'me_system',
'me_wave_m',
'me_wave_s',
'me_wind_m',
'me_wind_s',
'no_sound',
'sys_bazar',
'sys_clear',
'sys_map',
'sys_menu',
'sys_monster',
'sys_shop',
'sys_steal',
]
track_names = [
'dun_bossfloor',
'ev_fear',
]
percussion_transpose = -12
default_to_percussion = False
def get_mapping(mapping, key, inner_key):
if key not in mapping:
return None
value = mapping[key]
if isinstance(value, dict):
if inner_key in value:
return value[inner_key]
elif 'Default' in value:
return value['Default']
else:
return None
return value
@dataclass
class Channel:
found_program: bool = False
current_program: int = None
current_mapped_program: int = None
has_non_percussion: bool = False
current_percussion: bool = default_to_percussion
found_note: bool = False
num_notes: int = 0
first_volume = None
volume_changes = None
for track_name in track_names:
print('Converting', track_name)
unmapped_programs = set()
unmapped_percussion_notes = set()
file_name = '{}.mid'.format(track_name)
file_location = os.path.join(parts_folder, file_name)
new_file_name = file_name
channels: Dict[int, Channel] = defaultdict(Channel)
used_channels = set()
mid = MidiFile(file_location)
type0 = len(mid.tracks) == 1
for i, track in enumerate(mid.tracks):
remove_messages = []
for msg in track:
if msg.type == 'sysex':
remove_messages.append(msg)
if hasattr(msg, 'channel'):
used_channels.add(msg.channel)
channel = channels[msg.channel]
if channel.volume_changes is None:
channel.volume_changes = []
if msg.type == 'program_change':
channel.found_program = True
channel.current_program = msg.program
if msg.program in percussion_programs:
channel.current_percussion = True
msg.program = PERCUSSION
else:
channel.current_percussion = False
if msg.program in program_mapping:
mapped_program = get_mapping(program_mapping, msg.program, track_name)
if mapped_program is None:
remove_messages.append(msg)
else:
msg.program = mapped_program
else:
unmapped_programs.add(msg.program)
channel.current_mapped_program = msg.program
for msg in track:
if msg.type != 'program_change' and hasattr(msg, 'channel'):
channel = channels[msg.channel]
if channel.found_program and channel.current_mapped_program is None:
remove_messages.append(msg)
else:
if msg.type == 'note_on' or msg.type == 'note_off':
if msg.type == 'note_on':
channel.num_notes += 1
if channel.current_percussion:
note = msg.note + percussion_transpose
if note in percussion_parts:
mapped_note = percussion_parts[note]
found_note = False
if isinstance(mapped_note, int):
msg.note = percussion_parts[note]
found_note = True
elif mapped_note is None:
msg.velocity = 0
found_note = True
else:
if channel.current_program in mapped_note:
mapped_note = mapped_note[channel.current_program]
found_note = True
elif 'Default' in mapped_note:
mapped_note = mapped_note['Default']
found_note = True
if mapped_note is None:
msg.velocity = 0
elif found_note:
msg.note = mapped_note
if not found_note:
unmapped_percussion_notes.add(note)
else:
unmapped_percussion_notes.add(note)
else:
channel.has_non_percussion = True
current_transpose = get_mapping(program_transpose, channel.current_program, channel.current_mapped_program)
if current_transpose:
msg.note += current_transpose
if msg.type == 'control_change' and (msg.control == 7 or msg.control == 10):
channel.volume_changes.append(msg)
for msg in remove_messages:
track.remove(msg)
remap_percussion_channel = None
if channels[PERCUSSION_CHANNEL].has_non_percussion:
for i in range(0, 15):
if i not in channels:
remap_percussion_channel = i
print('Remapping channel', PERCUSSION_CHANNEL, 'to', remap_percussion_channel)
break
if remap_percussion_channel is None:
for i in range(0, 15):
if channels[i].current_percussion:
remap_percussion_channel = i
print('Remapping channel', PERCUSSION_CHANNEL, 'to', remap_percussion_channel)
break
if remap_percussion_channel is None:
print('No channel found to remap percussion.')
primary_percussion_channel: Channel = None
for channel in channels.values():
if channel.current_percussion and (not primary_percussion_channel or primary_percussion_channel.num_notes < channel.num_notes):
primary_percussion_channel = channel
for i, channel in channels.items():
if channel.current_percussion and channel is not primary_percussion_channel:
print('Removing volume changes for channel %d.' % i)
for msg in channel.volume_changes:
msg.control = 0
msg.value = 0
for msg in track:
if hasattr(msg, 'channel'):
channel = channels[msg.channel]
if msg.type == 'note_on':
channel.found_note = True
if msg.channel == PERCUSSION_CHANNEL and remap_percussion_channel is not None:
msg.channel = remap_percussion_channel
elif channel.current_percussion and (channel.found_note or not default_to_percussion):
msg.channel = PERCUSSION_CHANNEL
if len(unmapped_programs):
print('Encountered unmapped programs:', sorted(list(unmapped_programs)))
if len(unmapped_percussion_notes):
print('Encountered unmapped percussion notes:', sorted(list(unmapped_percussion_notes)))
if overwrite:
new_file_path = file_location
else:
sep_index = new_file_name.rfind(os.sep)
if sep_index >= 0:
new_file_name = new_file_name[sep_index + 1:]
new_file_path = os.path.join(new_file_location, new_file_name)
print('Saving file to', new_file_path)
if not dry_run:
mid.save(new_file_path)
if type0:
subdivide_midi_tracks(new_file_path).save(new_file_path)
| python | 7,456 |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for FITS driver.
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2008, Even Rouault <even dot rouault @ mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import gdal
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
#
def fits_init():
try:
gdaltest.fitsDriver = gdal.GetDriverByName('FITS')
except:
gdaltest.fitsDriver = None
if gdaltest.fitsDriver is None:
return 'skip'
return 'success'
###############################################################################
#
class TestFITS:
def __init__( self, fileName ):
self.fileName = fileName
def test( self ):
if gdaltest.fitsDriver is None:
return 'skip'
ds = gdal.Open('../gcore/data/' + self.fileName + '.tif')
gdaltest.fitsDriver.CreateCopy('tmp/' + self.fileName + '.fits', ds, options = [ 'PAGESIZE=2,2' ] )
ds2 = gdal.Open('tmp/' + self.fileName + '.fits')
if ds2.GetRasterBand(1).Checksum() != ds.GetRasterBand(1).Checksum():
return 'fail'
if ds2.GetRasterBand(1).DataType != ds.GetRasterBand(1).DataType:
return 'fail'
ds2 = None
gdaltest.fitsDriver.Delete('tmp/' + self.fileName + '.fits')
return 'success'
###############################################################################
#
def fits_metadata():
if gdaltest.fitsDriver is None:
return 'skip'
ds = gdal.Open('../gcore/data/byte.tif')
ds2 = gdaltest.fitsDriver.CreateCopy('tmp/byte.fits', ds )
md = { 'TEST' : 'test_value' }
ds2.SetMetadata(md)
ds2 = None
try:
os.unlink('tmp/byte.fits.aux.xml')
except:
pass
ds2 = gdal.Open('tmp/byte.fits')
md = ds2.GetMetadata()
ds2 = None
if md['TEST'] != 'test_value':
return 'fail'
ds2 = gdal.Open('tmp/byte.fits', gdal.GA_Update)
md = { 'TEST2' : 'test_value2' }
ds2.SetMetadata(md)
ds2 = None
try:
os.unlink('tmp/byte.fits.aux.xml')
except:
pass
ds2 = gdal.Open('tmp/byte.fits')
md = ds2.GetMetadata()
ds2 = None
if md['TEST2'] != 'test_value2':
return 'fail'
gdaltest.fitsDriver.Delete('tmp/byte.fits' )
return 'success'
###############################################################################
#
gdaltest_list = [ fits_init ]
fits_list = [ 'byte', 'int16', 'int32', 'float32', 'float64' ]
for item in fits_list:
ut = TestFITS( item )
gdaltest_list.append( (ut.test, item) )
gdaltest_list.append(fits_metadata)
if __name__ == '__main__':
gdaltest.setup_run( 'fits' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| python | 4,178 |
from LibCharm.CodonUsageTable import CodonUsageTable
def test_codonusagetable_fraction():
assert CodonUsageTable('http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?species=83333&aa=1&style=N')
def test_codonusagetable_frequency():
assert CodonUsageTable('http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?species=83333&aa=1&style=N',
use_frequency=True) | python | 396 |
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 order for servers
"""
import boto
import boto.ec2
from boto.mashups.server import Server, ServerSet
from boto.mashups.iobject import IObject
from boto.pyami.config import Config
from boto.sdb.persist import get_domain, set_domain
import time
from boto.compat import StringIO
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge']
class Item(IObject):
def __init__(self):
self.region = None
self.name = None
self.instance_type = None
self.quantity = 0
self.zone = None
self.ami = None
self.groups = []
self.key = None
self.ec2 = None
self.config = None
def set_userdata(self, key, value):
self.userdata[key] = value
def get_userdata(self, key):
return self.userdata[key]
def set_region(self, region=None):
if region:
self.region = region
else:
l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()]
self.region = self.choose_from_list(l, prompt='Choose Region')
def set_name(self, name=None):
if name:
self.name = name
else:
self.name = self.get_string('Name')
def set_instance_type(self, instance_type=None):
if instance_type:
self.instance_type = instance_type
else:
self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')
def set_quantity(self, n=0):
if n > 0:
self.quantity = n
else:
self.quantity = self.get_int('Quantity')
def set_zone(self, zone=None):
if zone:
self.zone = zone
else:
l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
def set_ami(self, ami=None):
if ami:
self.ami = ami
else:
l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
self.ami = self.choose_from_list(l, prompt='Choose AMI')
def add_group(self, group=None):
if group:
self.groups.append(group)
else:
l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))
def set_key(self, key=None):
if key:
self.key = key
else:
l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
self.key = self.choose_from_list(l, prompt='Choose Keypair')
def update_config(self):
if not self.config.has_section('Credentials'):
self.config.add_section('Credentials')
self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
if not self.config.has_section('Pyami'):
self.config.add_section('Pyami')
sdb_domain = get_domain()
if sdb_domain:
self.config.set('Pyami', 'server_sdb_domain', sdb_domain)
self.config.set('Pyami', 'server_sdb_name', self.name)
def set_config(self, config_path=None):
if not config_path:
config_path = self.get_filename('Specify Config file')
self.config = Config(path=config_path)
def get_userdata_string(self):
s = StringIO()
self.config.write(s)
return s.getvalue()
def enter(self, **params):
self.region = params.get('region', self.region)
if not self.region:
self.set_region()
self.ec2 = self.region.connect()
self.name = params.get('name', self.name)
if not self.name:
self.set_name()
self.instance_type = params.get('instance_type', self.instance_type)
if not self.instance_type:
self.set_instance_type()
self.zone = params.get('zone', self.zone)
if not self.zone:
self.set_zone()
self.quantity = params.get('quantity', self.quantity)
if not self.quantity:
self.set_quantity()
self.ami = params.get('ami', self.ami)
if not self.ami:
self.set_ami()
self.groups = params.get('groups', self.groups)
if not self.groups:
self.add_group()
self.key = params.get('key', self.key)
if not self.key:
self.set_key()
self.config = params.get('config', self.config)
if not self.config:
self.set_config()
self.update_config()
class Order(IObject):
def __init__(self):
self.items = []
self.reservation = None
def add_item(self, **params):
item = Item()
item.enter(**params)
self.items.append(item)
def display(self):
print('This Order consists of the following items')
print()
print('QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair')
for item in self.items:
print('%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type,
item.ami.id, item.groups, item.key.name))
def place(self, block=True):
if get_domain() is None:
print('SDB Persistence Domain not set')
domain_name = self.get_string('Specify SDB Domain')
set_domain(domain_name)
s = ServerSet()
for item in self.items:
r = item.ami.run(min_count=1, max_count=item.quantity,
key_name=item.key.name, user_data=item.get_userdata_string(),
security_groups=item.groups, instance_type=item.instance_type,
placement=item.zone.name)
if block:
states = [i.state for i in r.instances]
if states.count('running') != len(states):
print(states)
time.sleep(15)
states = [i.update() for i in r.instances]
for i in r.instances:
server = Server()
server.name = item.name
server.instance_id = i.id
server.reservation = r
server.save()
s.append(server)
if len(s) == 1:
return s[0]
else:
return s
| python | 7,584 |
import pickle
import time
import pandas
import os
val2014 = [f for f in os.listdir("D:\ImageCap\image\\val2014_resized")]
val2017 = [f for f in os.listdir("D:\ImageCap\image\\val2017")]
print(len(val2014))
print(len(val2017))
print(val2014[0].replace('COCO_val2014_', ''))
print(val2017[0])
# quit()
# with open('train.annotations.pkl', 'rb') as f:
# data1 = pickle.load(f).to_dict('dict')
# # print(type(data))
# newData1 = []
# for i in range(0, len(data1['caption'])):
# temp = {}
# temp['caption'] = data1['caption'][i]
# temp['fileName'] = data1['file_name'][i]
# # print(data1['image_id'][i].item())
# # input()
# temp['imageId'] = data1['image_id'][i].item()
# newData1 += [temp]
# newData1 = sorted(newData1, key=lambda k: k['imageId'])
# print(len(newData1))
# with open('train.annotations2.pkl', 'rb') as f:
# data2 = pickle.load(f).to_dict('dict')
# # print(type(data))
# newData2 = []
# for i in range(0, len(data2['caption'])):
# temp = {}
# temp['caption'] = data2['caption'][i]
# temp['fileName'] = data2['file_name'][i]
# # print(data2['image_id'][i].item())
# # input()
# temp['imageId'] = data2['image_id'][i].item()
# newData2 += [temp]
# newData2 = sorted(newData2, key=lambda k: k['imageId'])
# print(len(newData2))
# # print(type(data2))
# with open('word_to_idx.pkl', 'rb') as f:
# data = pickle.load(f)
# print(type(data))
# print(len(data))
# with open('word_to_idx2.pkl', 'rb') as f:
# data = pickle.load(f)
# print(type(data))
# print(len(data))
# data1 = data1.tolist()
# data2 = data2.tolist()
count = 0
t = time.time()
for i in range(0, len(val2014)):
for j in range(0, len(val2017)):
# print(data1[i].replace('image/train2014_resized/COCO_train2014_', ''))
# print(data2[j].replace('image/train2017/', ''))
# input()
if (val2014[i].replace('COCO_val2014_', '') == val2017[j]):
# print("HERE")
val2017.remove(val2017[j])
count = count + 1
break
if (i % 5000 == 0):
print("Processed ", i, " images! Time taken: ", time.time() - t)
# # print(data1[0].replace('image/train2014_resized/COCO_train2014_', ''))
# # print(data2[1].replace('image/train2017/', ''))
print(count) | python | 2,342 |
# -*- coding: utf-8 -*-
'''
Manage users with the useradd command
'''
# Import python libs
try:
import grp
import pwd
except ImportError:
pass
import os
import logging
import copy
# Import salt libs
import salt.utils
from salt._compat import string_types
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'user'
def __virtual__():
'''
Set the user module if the kernel is FreeBSD
'''
return __virtualname__ if __grains__['kernel'] == 'FreeBSD' else False
def _get_gecos(name):
'''
Retrieve GECOS field info and return it in dictionary form
'''
gecos_field = pwd.getpwnam(name).pw_gecos.split(',', 3)
if not gecos_field:
return {}
else:
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
return {'fullname': str(gecos_field[0]),
'roomnumber': str(gecos_field[1]),
'workphone': str(gecos_field[2]),
'homephone': str(gecos_field[3])}
def _build_gecos(gecos_dict):
'''
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with pw usermod.
'''
return '{0},{1},{2},{3}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', ''))
def add(name,
uid=None,
gid=None,
groups=None,
home=None,
shell=None,
unique=True,
fullname='',
roomnumber='',
workphone='',
homephone='',
createhome=True,
**kwargs):
'''
Add a user to the minion
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
'''
kwargs = salt.utils.clean_kwargs(**kwargs)
if salt.utils.is_true(kwargs.pop('system', False)):
log.warning('pw_user module does not support the \'system\' argument')
if kwargs:
log.warning('Invalid kwargs passed to user.add')
if isinstance(groups, string_types):
groups = groups.split(',')
cmd = 'pw useradd '
if uid:
cmd += '-u {0} '.format(uid)
if gid:
cmd += '-g {0} '.format(gid)
if groups:
cmd += '-G {0} '.format(','.join(groups))
if home is not None:
cmd += '-b {0} '.format(os.path.dirname(home))
if createhome is True:
cmd += '-m '
if shell:
cmd += '-s {0} '.format(shell)
if not salt.utils.is_true(unique):
cmd += '-o '
gecos_field = '{0},{1},{2},{3}'.format(fullname,
roomnumber,
workphone,
homephone)
cmd += '-c "{0}" '.format(gecos_field)
cmd += '-n {0}'.format(name)
ret = __salt__['cmd.run_all'](cmd)
return not ret['retcode']
def delete(name, remove=False, force=False):
'''
Remove a user from the minion
CLI Example:
.. code-block:: bash
salt '*' user.delete name remove=True force=True
'''
if salt.utils.is_true(force):
log.error('pw userdel does not support force-deleting user while '
'user is logged in')
cmd = 'pw userdel '
if remove:
cmd += '-r '
cmd += '-n ' + name
ret = __salt__['cmd.run_all'](cmd)
return not ret['retcode']
def getent(refresh=False):
'''
Return the list of all info for all users
CLI Example:
.. code-block:: bash
salt '*' user.getent
'''
if 'user.getent' in __context__ and not refresh:
return __context__['user.getent']
ret = []
for data in pwd.getpwall():
ret.append(info(data.pw_name))
__context__['user.getent'] = ret
return ret
def chuid(name, uid):
'''
Change the uid for a named user
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376
'''
pre_info = info(name)
if uid == pre_info['uid']:
return True
cmd = 'pw usermod -u {0} -n {1}'.format(uid, name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['uid'] != pre_info['uid']:
return post_info['uid'] == uid
return False
def chgid(name, gid):
'''
Change the default group of the user
CLI Example:
.. code-block:: bash
salt '*' user.chgid foo 4376
'''
pre_info = info(name)
if gid == pre_info['gid']:
return True
cmd = 'pw usermod -g {0} -n {1}'.format(gid, name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['gid'] != pre_info['gid']:
return post_info['gid'] == gid
return False
def chshell(name, shell):
'''
Change the default shell of the user
CLI Example:
.. code-block:: bash
salt '*' user.chshell foo /bin/zsh
'''
pre_info = info(name)
if shell == pre_info['shell']:
return True
cmd = 'pw usermod -s {0} -n {1}'.format(shell, name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['shell'] != pre_info['shell']:
return post_info['shell'] == shell
return False
def chhome(name, home, persist=False):
'''
Change the home directory of the user, pass true for persist to copy files
to the new home dir
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /home/users/foo True
'''
pre_info = info(name)
if home == pre_info['home']:
return True
cmd = 'pw usermod {0} -d {1}'.format(name, home)
if persist:
cmd += ' -m '
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['home'] != pre_info['home']:
return post_info['home'] == home
return False
def chgroups(name, groups, append=False):
'''
Change the groups this user belongs to, add append to append the specified
groups
CLI Example:
.. code-block:: bash
salt '*' user.chgroups foo wheel,root True
'''
if isinstance(groups, string_types):
groups = groups.split(',')
ugrps = set(list_groups(name))
if ugrps == set(groups):
return True
if append:
groups += ugrps
cmd = 'pw usermod -G {0} -n {1}'.format(','.join(groups), name)
return not __salt__['cmd.retcode'](cmd)
def chfullname(name, fullname):
'''
Change the user's Full Name
CLI Example:
.. code-block:: bash
salt '*' user.chfullname foo "Foo Bar"
'''
fullname = str(fullname)
pre_info = _get_gecos(name)
if not pre_info:
return False
if fullname == pre_info['fullname']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['fullname'] = fullname
cmd = 'pw usermod {0} -c "{1}"'.format(name, _build_gecos(gecos_field))
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['fullname'] != pre_info['fullname']:
return post_info['fullname'] == fullname
return False
def chroomnumber(name, roomnumber):
'''
Change the user's Room Number
CLI Example:
.. code-block:: bash
salt '*' user.chroomnumber foo 123
'''
roomnumber = str(roomnumber)
pre_info = _get_gecos(name)
if not pre_info:
return False
if roomnumber == pre_info['roomnumber']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['roomnumber'] = roomnumber
cmd = 'pw usermod {0} -c "{1}"'.format(name, _build_gecos(gecos_field))
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['roomnumber'] != pre_info['roomnumber']:
return post_info['roomnumber'] == roomnumber
return False
def chworkphone(name, workphone):
'''
Change the user's Work Phone
CLI Example:
.. code-block:: bash
salt '*' user.chworkphone foo "7735550123"
'''
workphone = str(workphone)
pre_info = _get_gecos(name)
if not pre_info:
return False
if workphone == pre_info['workphone']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['workphone'] = workphone
cmd = 'pw usermod {0} -c "{1}"'.format(name, _build_gecos(gecos_field))
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['workphone'] != pre_info['workphone']:
return post_info['workphone'] == workphone
return False
def chhomephone(name, homephone):
'''
Change the user's Home Phone
CLI Example:
.. code-block:: bash
salt '*' user.chhomephone foo "7735551234"
'''
homephone = str(homephone)
pre_info = _get_gecos(name)
if not pre_info:
return False
if homephone == pre_info['homephone']:
return True
gecos_field = copy.deepcopy(pre_info)
gecos_field['homephone'] = homephone
cmd = 'pw usermod {0} -c "{1}"'.format(name, _build_gecos(gecos_field))
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['homephone'] != pre_info['homephone']:
return post_info['homephone'] == homephone
return False
def info(name):
'''
Return user information
CLI Example:
.. code-block:: bash
salt '*' user.info root
'''
ret = {}
try:
data = pwd.getpwnam(name)
ret['gid'] = data.pw_gid
ret['groups'] = list_groups(name)
ret['home'] = data.pw_dir
ret['name'] = data.pw_name
ret['passwd'] = data.pw_passwd
ret['shell'] = data.pw_shell
ret['uid'] = data.pw_uid
# Put GECOS info into a list
gecos_field = data.pw_gecos.split(',', 3)
# Assign empty strings for any unspecified GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
ret['fullname'] = gecos_field[0]
ret['roomnumber'] = gecos_field[1]
ret['workphone'] = gecos_field[2]
ret['homephone'] = gecos_field[3]
except KeyError:
return {}
return ret
def list_groups(name):
'''
Return a list of groups the named user belongs to
CLI Example:
.. code-block:: bash
salt '*' user.list_groups foo
'''
ugrp = set()
# Add the primary user's group
try:
ugrp.add(grp.getgrgid(pwd.getpwnam(name).pw_gid).gr_name)
except KeyError:
# The user's applied default group is undefined on the system, so
# it does not exist
pass
# If we already grabbed the group list, it's overkill to grab it again
if 'user.getgrall' in __context__:
groups = __context__['user.getgrall']
else:
groups = grp.getgrall()
__context__['user.getgrall'] = groups
# Now, all other groups the user belongs to
for group in groups:
if name in group.gr_mem:
ugrp.add(group.gr_name)
return sorted(list(ugrp))
| python | 11,015 |
import abc
import functools
import os
import time
import bpemb
import corenlp
import torch
import torchtext
from seq2struct.resources import corenlp
from seq2struct.utils import registry
class Embedder(metaclass=abc.ABCMeta):
@abc.abstractmethod
def tokenize(self, sentence):
'''Given a string, return a list of tokens suitable for lookup.'''
pass
@abc.abstractmethod
def untokenize(self, tokens):
'''Undo tokenize.'''
pass
@abc.abstractmethod
def lookup(self, token):
'''Given a token, return a vector embedding if token is in vocabulary.
If token is not in the vocabulary, then return None.'''
pass
@abc.abstractmethod
def contains(self, token):
pass
@abc.abstractmethod
def to(self, device):
'''Transfer the pretrained embeddings to the given device.'''
pass
@registry.register('word_emb', 'glove')
class GloVe(Embedder):
def __init__(self, kind):
cache = os.path.join(os.environ.get('CACHE_DIR', os.getcwd()), '.vector_cache')
self.glove = torchtext.vocab.GloVe(name=kind, cache=cache)
self.dim = self.glove.dim
self.vectors = self.glove.vectors
@functools.lru_cache(maxsize=1024)
def tokenize(self, text):
ann = corenlp.annotate(text, annotators=['tokenize', 'ssplit'])
return [tok.word.lower() for sent in ann.sentence for tok in sent.token]
def untokenize(self, tokens):
return ' '.join(tokens)
def lookup(self, token):
i = self.glove.stoi.get(token)
if i is None:
return None
return self.vectors[i]
def contains(self, token):
return token in self.glove.stoi
def to(self, device):
self.vectors = self.vectors.to(device)
@registry.register('word_emb', 'bpemb')
class BPEmb(Embedder):
def __init__(self, dim, vocab_size, lang='en'):
self.bpemb = bpemb.BPEmb(lang=lang, dim=dim, vs=vocab_size)
self.dim = dim
self.vectors = torch.from_numpy(self.bpemb.vectors)
def tokenize(self, text):
return self.bpemb.encode(text)
def untokenize(self, tokens):
return self.bpemb.decode(tokens)
def lookup(self, token):
i = self.bpemb.spm.PieceToId(token)
if i == self.bpemb.spm.unk_id():
return None
return self.vectors[i]
def contains(self, token):
return self.lookup(token) is not None
def to(self, device):
self.vectors = self.vectors.to(device)
| python | 2,537 |
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import tempfile
import logging
import shutil
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves.urllib.error import URLError
from salt.ext.six.moves.urllib.request import urlopen
# pylint: enable=import-error,no-name-in-module,redefined-builtin
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.paths import FILES, TMP
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import requires_network, skip_if_binaries_missing
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.modules.zcbuildout as buildout
import salt.modules.cmdmod as cmd
ROOT = os.path.join(FILES, 'file', 'base', 'buildout')
KNOWN_VIRTUALENV_BINARY_NAMES = (
'virtualenv',
'virtualenv2',
'virtualenv-2.6',
'virtualenv-2.7'
)
BOOT_INIT = {
1: [
'var/ver/1/bootstrap/bootstrap.py',
],
2: [
'var/ver/2/bootstrap/bootstrap.py',
'b/bootstrap.py',
]}
log = logging.getLogger(__name__)
def download_to(url, dest):
with salt.utils.files.fopen(dest, 'w') as fic:
fic.write(urlopen(url, timeout=10).read())
@skipIf(True, 'These tests are not running reliably')
class Base(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
buildout: {
'__salt__': {
'cmd.run_all': cmd.run_all,
'cmd.run': cmd.run,
'cmd.retcode': cmd.retcode,
}
}
}
@classmethod
def setUpClass(cls):
if not os.path.isdir(TMP):
os.makedirs(TMP)
cls.rdir = tempfile.mkdtemp(dir=TMP)
cls.tdir = os.path.join(cls.rdir, 'test')
for idx, url in six.iteritems(buildout._URL_VERSIONS):
log.debug('Downloading bootstrap from {0}'.format(url))
dest = os.path.join(
cls.rdir, '{0}_bootstrap.py'.format(idx)
)
try:
download_to(url, dest)
except URLError:
log.debug('Failed to download {0}'.format(url))
# creating a new setuptools install
cls.ppy_st = os.path.join(cls.rdir, 'psetuptools')
cls.py_st = os.path.join(cls.ppy_st, 'bin', 'python')
ret1 = buildout._Popen((
'{0} --no-site-packages {1};'
'{1}/bin/pip install -U setuptools; '
'{1}/bin/easy_install -U distribute;').format(
salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES),
cls.ppy_st
)
)
assert ret1['retcode'] == 0
@classmethod
def tearDownClass(cls):
if os.path.isdir(cls.rdir):
shutil.rmtree(cls.rdir)
def setUp(self):
super(Base, self).setUp()
self._remove_dir()
shutil.copytree(ROOT, self.tdir)
for idx in BOOT_INIT:
path = os.path.join(
self.rdir, '{0}_bootstrap.py'.format(idx)
)
for fname in BOOT_INIT[idx]:
shutil.copy2(path, os.path.join(self.tdir, fname))
def tearDown(self):
super(Base, self).tearDown()
self._remove_dir()
def _remove_dir(self):
if os.path.isdir(self.tdir):
shutil.rmtree(self.tdir)
@skipIf(True, 'These tests are not running reliably')
@skipIf(salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None,
'The \'virtualenv\' packaged needs to be installed')
@skip_if_binaries_missing(['tar'])
class BuildoutTestCase(Base):
@requires_network()
def test_onlyif_unless(self):
b_dir = os.path.join(self.tdir, 'b')
ret = buildout.buildout(b_dir, onlyif='/bin/false')
self.assertTrue(ret['comment'] == 'onlyif condition is false')
self.assertTrue(ret['status'] is True)
ret = buildout.buildout(b_dir, unless='/bin/true')
self.assertTrue(ret['comment'] == 'unless condition is true')
self.assertTrue(ret['status'] is True)
@requires_network()
def test_salt_callback(self):
@buildout._salt_callback
def callback1(a, b=1):
for i in buildout.LOG.levels:
getattr(buildout.LOG, i)('{0}bar'.format(i[0]))
return 'foo'
def callback2(a, b=1):
raise Exception('foo')
# pylint: disable=invalid-sequence-index
ret1 = callback1(1, b=3)
# These lines are throwing pylint errors - disabling for now since we are skipping
# these tests
#self.assertEqual(ret1['status'], True)
#self.assertEqual(ret1['logs_by_level']['warn'], ['wbar'])
#self.assertEqual(ret1['comment'], '')
# These lines are throwing pylint errors - disabling for now since we are skipping
# these tests
#self.assertTrue(
# u''
# u'OUTPUT:\n'
# u'foo\n'
# u''
# in ret1['outlog']
#)
# These lines are throwing pylint errors - disabling for now since we are skipping
# these tests
#self.assertTrue(u'Log summary:\n' in ret1['outlog'])
# These lines are throwing pylint errors - disabling for now since we are skipping
# these tests
# self.assertTrue(
# u'INFO: ibar\n'
# u'WARN: wbar\n'
# u'DEBUG: dbar\n'
# u'ERROR: ebar\n'
# in ret1['outlog']
#)
# These lines are throwing pylint errors - disabling for now since we are skipping
# these tests
#self.assertTrue('by level' in ret1['outlog_by_level'])
#self.assertEqual(ret1['out'], 'foo')
ret2 = buildout._salt_callback(callback2)(2, b=6)
self.assertEqual(ret2['status'], False)
self.assertTrue(
ret2['logs_by_level']['error'][0].startswith('Traceback'))
self.assertTrue(
'We did not get any '
'expectable answer '
'from buildout' in ret2['comment'])
self.assertEqual(ret2['out'], None)
for l in buildout.LOG.levels:
self.assertTrue(0 == len(buildout.LOG.by_level[l]))
# pylint: enable=invalid-sequence-index
@requires_network()
def test_get_bootstrap_url(self):
for path in [os.path.join(self.tdir, 'var/ver/1/dumppicked'),
os.path.join(self.tdir, 'var/ver/1/bootstrap'),
os.path.join(self.tdir, 'var/ver/1/versions')]:
self.assertEqual(buildout._URL_VERSIONS[1],
buildout._get_bootstrap_url(path),
"b1 url for {0}".format(path))
for path in [
os.path.join(self.tdir, '/non/existing'),
os.path.join(self.tdir, 'var/ver/2/versions'),
os.path.join(self.tdir, 'var/ver/2/bootstrap'),
os.path.join(self.tdir, 'var/ver/2/default'),
]:
self.assertEqual(buildout._URL_VERSIONS[2],
buildout._get_bootstrap_url(path),
"b2 url for {0}".format(path))
@requires_network()
def test_get_buildout_ver(self):
for path in [os.path.join(self.tdir, 'var/ver/1/dumppicked'),
os.path.join(self.tdir, 'var/ver/1/bootstrap'),
os.path.join(self.tdir, 'var/ver/1/versions')]:
self.assertEqual(1,
buildout._get_buildout_ver(path),
"1 for {0}".format(path))
for path in [os.path.join(self.tdir, '/non/existing'),
os.path.join(self.tdir, 'var/ver/2/versions'),
os.path.join(self.tdir, 'var/ver/2/bootstrap'),
os.path.join(self.tdir, 'var/ver/2/default')]:
self.assertEqual(2,
buildout._get_buildout_ver(path),
"2 for {0}".format(path))
@requires_network()
def test_get_bootstrap_content(self):
self.assertEqual(
'',
buildout._get_bootstrap_content(
os.path.join(self.tdir, '/non/existing'))
)
self.assertEqual(
'',
buildout._get_bootstrap_content(
os.path.join(self.tdir, 'var/tb/1')))
self.assertEqual(
'foo\n',
buildout._get_bootstrap_content(
os.path.join(self.tdir, 'var/tb/2')))
@requires_network()
def test_logger_clean(self):
buildout.LOG.clear()
# nothing in there
self.assertTrue(
True not in
[len(buildout.LOG.by_level[a]) > 0
for a in buildout.LOG.by_level])
buildout.LOG.info('foo')
self.assertTrue(
True in
[len(buildout.LOG.by_level[a]) > 0
for a in buildout.LOG.by_level])
buildout.LOG.clear()
self.assertTrue(
True not in
[len(buildout.LOG.by_level[a]) > 0
for a in buildout.LOG.by_level])
@requires_network()
def test_logger_loggers(self):
buildout.LOG.clear()
# nothing in there
for i in buildout.LOG.levels:
getattr(buildout.LOG, i)('foo')
getattr(buildout.LOG, i)('bar')
getattr(buildout.LOG, i)('moo')
self.assertTrue(len(buildout.LOG.by_level[i]) == 3)
self.assertEqual(buildout.LOG.by_level[i][0], 'foo')
self.assertEqual(buildout.LOG.by_level[i][-1], 'moo')
@requires_network()
def test__find_cfgs(self):
result = sorted(
[a.replace(ROOT, '') for a in buildout._find_cfgs(ROOT)])
assertlist = sorted(
['/buildout.cfg',
'/c/buildout.cfg',
'/etc/buildout.cfg',
'/e/buildout.cfg',
'/b/buildout.cfg',
'/b/bdistribute/buildout.cfg',
'/b/b2/buildout.cfg',
'/foo/buildout.cfg'])
self.assertEqual(result, assertlist)
@requires_network()
def skip_test_upgrade_bootstrap(self):
b_dir = os.path.join(self.tdir, 'b')
bpy = os.path.join(b_dir, 'bootstrap.py')
buildout.upgrade_bootstrap(b_dir)
time1 = os.stat(bpy).st_mtime
with salt.utils.files.fopen(bpy) as fic:
data = fic.read()
self.assertTrue('setdefaulttimeout(2)' in data)
flag = os.path.join(b_dir, '.buildout', '2.updated_bootstrap')
self.assertTrue(os.path.exists(flag))
buildout.upgrade_bootstrap(b_dir, buildout_ver=1)
time2 = os.stat(bpy).st_mtime
with salt.utils.files.fopen(bpy) as fic:
data = fic.read()
self.assertTrue('setdefaulttimeout(2)' in data)
flag = os.path.join(b_dir, '.buildout', '1.updated_bootstrap')
self.assertTrue(os.path.exists(flag))
buildout.upgrade_bootstrap(b_dir, buildout_ver=1)
time3 = os.stat(bpy).st_mtime
self.assertNotEqual(time2, time1)
self.assertEqual(time2, time3)
@skipIf(salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None,
'The \'virtualenv\' packaged needs to be installed')
@skipIf(True, 'These tests are not running reliably')
class BuildoutOnlineTestCase(Base):
@classmethod
def setUpClass(cls):
super(BuildoutOnlineTestCase, cls).setUpClass()
cls.ppy_dis = os.path.join(cls.rdir, 'pdistibute')
cls.ppy_blank = os.path.join(cls.rdir, 'pblank')
cls.py_dis = os.path.join(cls.ppy_dis, 'bin', 'python')
cls.py_blank = os.path.join(cls.ppy_blank, 'bin', 'python')
# creating a distribute based install
try:
ret20 = buildout._Popen((
'{0} --no-site-packages --no-setuptools --no-pip {1}'.format(
salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES),
cls.ppy_dis
)
))
except buildout._BuildoutError:
ret20 = buildout._Popen((
'{0} --no-site-packages {1}'.format(
salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES),
cls.ppy_dis
))
)
assert ret20['retcode'] == 0
download_to('https://pypi.python.org/packages/source'
'/d/distribute/distribute-0.6.43.tar.gz',
os.path.join(cls.ppy_dis, 'distribute-0.6.43.tar.gz'))
ret2 = buildout._Popen((
'cd {0} &&'
' tar xzvf distribute-0.6.43.tar.gz && cd distribute-0.6.43 &&'
' {0}/bin/python setup.py install'
).format(cls.ppy_dis))
assert ret2['retcode'] == 0
# creating a blank based install
try:
ret3 = buildout._Popen((
'{0} --no-site-packages --no-setuptools --no-pip {1}'.format(
salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES),
cls.ppy_blank
)
))
except buildout._BuildoutError:
ret3 = buildout._Popen((
'{0} --no-site-packages {1}'.format(
salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES),
cls.ppy_blank
)
))
assert ret3['retcode'] == 0
@requires_network()
def test_buildout_bootstrap(self):
b_dir = os.path.join(self.tdir, 'b')
bd_dir = os.path.join(self.tdir, 'b', 'bdistribute')
b2_dir = os.path.join(self.tdir, 'b', 'b2')
self.assertTrue(buildout._has_old_distribute(self.py_dis))
# this is too hard to check as on debian & other where old
# packages are present (virtualenv), we can't have
# a clean site-packages
# self.assertFalse(buildout._has_old_distribute(self.py_blank))
self.assertFalse(buildout._has_old_distribute(self.py_st))
self.assertFalse(buildout._has_setuptools7(self.py_dis))
self.assertTrue(buildout._has_setuptools7(self.py_st))
self.assertFalse(buildout._has_setuptools7(self.py_blank))
ret = buildout.bootstrap(
bd_dir, buildout_ver=1, python=self.py_dis)
comment = ret['outlog']
self.assertTrue('--distribute' in comment)
self.assertTrue('Generated script' in comment)
ret = buildout.bootstrap(b_dir, buildout_ver=1, python=self.py_blank)
comment = ret['outlog']
# as we may have old packages, this test the two
# behaviors (failure with old setuptools/distribute)
self.assertTrue(
('Got ' in comment
and 'Generated script' in comment)
or ('setuptools>=0.7' in comment)
)
ret = buildout.bootstrap(b_dir, buildout_ver=2, python=self.py_blank)
comment = ret['outlog']
self.assertTrue(
('setuptools' in comment
and 'Generated script' in comment)
or ('setuptools>=0.7' in comment)
)
ret = buildout.bootstrap(b_dir, buildout_ver=2, python=self.py_st)
comment = ret['outlog']
self.assertTrue(
('setuptools' in comment
and 'Generated script' in comment)
or ('setuptools>=0.7' in comment)
)
ret = buildout.bootstrap(b2_dir, buildout_ver=2, python=self.py_st)
comment = ret['outlog']
self.assertTrue(
('setuptools' in comment
and 'Creating directory' in comment)
or ('setuptools>=0.7' in comment)
)
@requires_network()
def test_run_buildout(self):
b_dir = os.path.join(self.tdir, 'b')
ret = buildout.bootstrap(b_dir, buildout_ver=2, python=self.py_st)
self.assertTrue(ret['status'])
ret = buildout.run_buildout(b_dir,
parts=['a', 'b'])
out = ret['out']
self.assertTrue('Installing a' in out)
self.assertTrue('Installing b' in out)
@requires_network()
def test_buildout(self):
b_dir = os.path.join(self.tdir, 'b')
ret = buildout.buildout(b_dir, buildout_ver=2, python=self.py_st)
self.assertTrue(ret['status'])
out = ret['out']
comment = ret['comment']
self.assertTrue(ret['status'])
self.assertTrue('Creating directory' in out)
self.assertTrue('Installing a.' in out)
self.assertTrue('psetuptools/bin/python bootstrap.py' in comment)
self.assertTrue('buildout -c buildout.cfg' in comment)
ret = buildout.buildout(b_dir,
parts=['a', 'b', 'c'],
buildout_ver=2,
python=self.py_st)
outlog = ret['outlog']
out = ret['out']
comment = ret['comment']
self.assertTrue('Installing single part: a' in outlog)
self.assertTrue('buildout -c buildout.cfg -N install a' in comment)
self.assertTrue('Installing b.' in out)
self.assertTrue('Installing c.' in out)
ret = buildout.buildout(b_dir,
parts=['a', 'b', 'c'],
buildout_ver=2,
newest=True,
python=self.py_st)
outlog = ret['outlog']
out = ret['out']
comment = ret['comment']
self.assertTrue('buildout -c buildout.cfg -n install a' in comment)
@skipIf(True, 'These tests are not running reliably')
class BuildoutAPITestCase(TestCase):
def test_merge(self):
buildout.LOG.clear()
buildout.LOG.info('àé')
buildout.LOG.info(u'àé')
buildout.LOG.error('àé')
buildout.LOG.error(u'àé')
ret1 = buildout._set_status({}, out='éà')
uret1 = buildout._set_status({}, out=u'éà')
buildout.LOG.clear()
buildout.LOG.info('ççàé')
buildout.LOG.info(u'ççàé')
buildout.LOG.error('ççàé')
buildout.LOG.error(u'ççàé')
ret2 = buildout._set_status({}, out='çéà')
uret2 = buildout._set_status({}, out=u'çéà')
uretm = buildout._merge_statuses([ret1, uret1, ret2, uret2])
for ret in ret1, uret1, ret2, uret2:
out = ret['out']
if not isinstance(ret['out'], six.text_type):
out = ret['out'].decode('utf-8')
for out in ['àé', 'ççàé']:
self.assertTrue(out in uretm['logs_by_level']['info'])
self.assertTrue(out in uretm['outlog_by_level'])
def test_setup(self):
buildout.LOG.clear()
buildout.LOG.info('àé')
buildout.LOG.info(u'àé')
buildout.LOG.error('àé')
buildout.LOG.error(u'àé')
ret = buildout._set_status({}, out='éà')
uret = buildout._set_status({}, out=u'éà')
self.assertTrue(ret['outlog'] == uret['outlog'])
self.assertTrue('àé' in uret['outlog_by_level'])
| python | 19,102 |
from flask import request
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, FileField
from wtforms.validators import ValidationError, DataRequired, Length
from flask_babel import _, lazy_gettext as _l
from app.models import User
from flask_wtf.file import FileAllowed
class EditProfileForm(FlaskForm):
username = StringField(_l('username'), validators=[DataRequired()])
about_me = TextAreaField(_l('about me'),
validators=[Length(min=0, max=140)])
submit = SubmitField(_l('submit'))
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_('please use a different username.'))
class EmptyForm(FlaskForm):
submit = SubmitField('submit')
class PostForm(FlaskForm):
title = TextAreaField(_l(''), validators=[DataRequired(),
Length(min=1, max=25, message=(u'title can only be up to 25 characters'))])
post = TextAreaField(_l(''), validators=[DataRequired(),
Length(min=1, max=500, message=(u'the description can only be up to 500 characters'))])
filename = FileField('')
submit = SubmitField(_l('grape it \U0001F347'))
| python | 1,497 |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ResponderRecipeResponderRule(object):
"""
Details of ResponderRule.
"""
#: A constant which can be used with the type property of a ResponderRecipeResponderRule.
#: This constant has a value of "REMEDIATION"
TYPE_REMEDIATION = "REMEDIATION"
#: A constant which can be used with the type property of a ResponderRecipeResponderRule.
#: This constant has a value of "NOTIFICATION"
TYPE_NOTIFICATION = "NOTIFICATION"
#: A constant which can be used with the supported_modes property of a ResponderRecipeResponderRule.
#: This constant has a value of "AUTOACTION"
SUPPORTED_MODES_AUTOACTION = "AUTOACTION"
#: A constant which can be used with the supported_modes property of a ResponderRecipeResponderRule.
#: This constant has a value of "USERACTION"
SUPPORTED_MODES_USERACTION = "USERACTION"
#: A constant which can be used with the lifecycle_state property of a ResponderRecipeResponderRule.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a ResponderRecipeResponderRule.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a ResponderRecipeResponderRule.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a ResponderRecipeResponderRule.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a ResponderRecipeResponderRule.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a ResponderRecipeResponderRule.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a ResponderRecipeResponderRule.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new ResponderRecipeResponderRule object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param responder_rule_id:
The value to assign to the responder_rule_id property of this ResponderRecipeResponderRule.
:type responder_rule_id: str
:param display_name:
The value to assign to the display_name property of this ResponderRecipeResponderRule.
:type display_name: str
:param description:
The value to assign to the description property of this ResponderRecipeResponderRule.
:type description: str
:param type:
The value to assign to the type property of this ResponderRecipeResponderRule.
Allowed values for this property are: "REMEDIATION", "NOTIFICATION", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type type: str
:param policies:
The value to assign to the policies property of this ResponderRecipeResponderRule.
:type policies: list[str]
:param supported_modes:
The value to assign to the supported_modes property of this ResponderRecipeResponderRule.
Allowed values for items in this list are: "AUTOACTION", "USERACTION", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type supported_modes: list[str]
:param details:
The value to assign to the details property of this ResponderRecipeResponderRule.
:type details: oci.cloud_guard.models.ResponderRuleDetails
:param compartment_id:
The value to assign to the compartment_id property of this ResponderRecipeResponderRule.
:type compartment_id: str
:param time_created:
The value to assign to the time_created property of this ResponderRecipeResponderRule.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this ResponderRecipeResponderRule.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ResponderRecipeResponderRule.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this ResponderRecipeResponderRule.
:type lifecycle_details: str
"""
self.swagger_types = {
'responder_rule_id': 'str',
'display_name': 'str',
'description': 'str',
'type': 'str',
'policies': 'list[str]',
'supported_modes': 'list[str]',
'details': 'ResponderRuleDetails',
'compartment_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'lifecycle_details': 'str'
}
self.attribute_map = {
'responder_rule_id': 'responderRuleId',
'display_name': 'displayName',
'description': 'description',
'type': 'type',
'policies': 'policies',
'supported_modes': 'supportedModes',
'details': 'details',
'compartment_id': 'compartmentId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails'
}
self._responder_rule_id = None
self._display_name = None
self._description = None
self._type = None
self._policies = None
self._supported_modes = None
self._details = None
self._compartment_id = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._lifecycle_details = None
@property
def responder_rule_id(self):
"""
**[Required]** Gets the responder_rule_id of this ResponderRecipeResponderRule.
Identifier for ResponderRule.
:return: The responder_rule_id of this ResponderRecipeResponderRule.
:rtype: str
"""
return self._responder_rule_id
@responder_rule_id.setter
def responder_rule_id(self, responder_rule_id):
"""
Sets the responder_rule_id of this ResponderRecipeResponderRule.
Identifier for ResponderRule.
:param responder_rule_id: The responder_rule_id of this ResponderRecipeResponderRule.
:type: str
"""
self._responder_rule_id = responder_rule_id
@property
def display_name(self):
"""
Gets the display_name of this ResponderRecipeResponderRule.
ResponderRule Display Name
:return: The display_name of this ResponderRecipeResponderRule.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this ResponderRecipeResponderRule.
ResponderRule Display Name
:param display_name: The display_name of this ResponderRecipeResponderRule.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this ResponderRecipeResponderRule.
ResponderRule Description
:return: The description of this ResponderRecipeResponderRule.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ResponderRecipeResponderRule.
ResponderRule Description
:param description: The description of this ResponderRecipeResponderRule.
:type: str
"""
self._description = description
@property
def type(self):
"""
Gets the type of this ResponderRecipeResponderRule.
Type of Responder
Allowed values for this property are: "REMEDIATION", "NOTIFICATION", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The type of this ResponderRecipeResponderRule.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ResponderRecipeResponderRule.
Type of Responder
:param type: The type of this ResponderRecipeResponderRule.
:type: str
"""
allowed_values = ["REMEDIATION", "NOTIFICATION"]
if not value_allowed_none_or_none_sentinel(type, allowed_values):
type = 'UNKNOWN_ENUM_VALUE'
self._type = type
@property
def policies(self):
"""
Gets the policies of this ResponderRecipeResponderRule.
List of Policy
:return: The policies of this ResponderRecipeResponderRule.
:rtype: list[str]
"""
return self._policies
@policies.setter
def policies(self, policies):
"""
Sets the policies of this ResponderRecipeResponderRule.
List of Policy
:param policies: The policies of this ResponderRecipeResponderRule.
:type: list[str]
"""
self._policies = policies
@property
def supported_modes(self):
"""
Gets the supported_modes of this ResponderRecipeResponderRule.
Supported Execution Modes
Allowed values for items in this list are: "AUTOACTION", "USERACTION", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The supported_modes of this ResponderRecipeResponderRule.
:rtype: list[str]
"""
return self._supported_modes
@supported_modes.setter
def supported_modes(self, supported_modes):
"""
Sets the supported_modes of this ResponderRecipeResponderRule.
Supported Execution Modes
:param supported_modes: The supported_modes of this ResponderRecipeResponderRule.
:type: list[str]
"""
allowed_values = ["AUTOACTION", "USERACTION"]
if supported_modes:
supported_modes[:] = ['UNKNOWN_ENUM_VALUE' if not value_allowed_none_or_none_sentinel(x, allowed_values) else x for x in supported_modes]
self._supported_modes = supported_modes
@property
def details(self):
"""
Gets the details of this ResponderRecipeResponderRule.
:return: The details of this ResponderRecipeResponderRule.
:rtype: oci.cloud_guard.models.ResponderRuleDetails
"""
return self._details
@details.setter
def details(self, details):
"""
Sets the details of this ResponderRecipeResponderRule.
:param details: The details of this ResponderRecipeResponderRule.
:type: oci.cloud_guard.models.ResponderRuleDetails
"""
self._details = details
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ResponderRecipeResponderRule.
Compartment Identifier
:return: The compartment_id of this ResponderRecipeResponderRule.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ResponderRecipeResponderRule.
Compartment Identifier
:param compartment_id: The compartment_id of this ResponderRecipeResponderRule.
:type: str
"""
self._compartment_id = compartment_id
@property
def time_created(self):
"""
Gets the time_created of this ResponderRecipeResponderRule.
The date and time the responder recipe rule was created. Format defined by RFC3339.
:return: The time_created of this ResponderRecipeResponderRule.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this ResponderRecipeResponderRule.
The date and time the responder recipe rule was created. Format defined by RFC3339.
:param time_created: The time_created of this ResponderRecipeResponderRule.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this ResponderRecipeResponderRule.
The date and time the responder recipe rule was updated. Format defined by RFC3339.
:return: The time_updated of this ResponderRecipeResponderRule.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this ResponderRecipeResponderRule.
The date and time the responder recipe rule was updated. Format defined by RFC3339.
:param time_updated: The time_updated of this ResponderRecipeResponderRule.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this ResponderRecipeResponderRule.
The current state of the ResponderRule.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this ResponderRecipeResponderRule.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ResponderRecipeResponderRule.
The current state of the ResponderRule.
:param lifecycle_state: The lifecycle_state of this ResponderRecipeResponderRule.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this ResponderRecipeResponderRule.
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:return: The lifecycle_details of this ResponderRecipeResponderRule.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this ResponderRecipeResponderRule.
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param lifecycle_details: The lifecycle_details of this ResponderRecipeResponderRule.
:type: str
"""
self._lifecycle_details = lifecycle_details
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| python | 16,869 |
from lab.distribution import CustomDistr
def main():
for distr in ["Normal", "Cauchy", "Laplace", "Poisson", "Uniform"]:
for size in [20, 100]:
obj = CustomDistr(distr, size)
obj.boxplot(1000)
| python | 231 |
from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
def has_object_permission(self,request,view,obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow user their update own status"""
def has_object_permission(self,request,view,obj):
"""Check user is trying to update their status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| python | 673 |
import sys
import os
import logging
import subprocess
import time
import shlex
# from arm.config.config import cfg
from arm.ripper import utils # noqa: E402
from arm.ui import db
def makemkv(logfile, job):
"""
Rip Blurays with MakeMKV\n
logfile = Location of logfile to redirect MakeMKV logs to\n
job = job object\n
Returns path to ripped files.
"""
logging.info("Starting MakeMKV rip. Method is " + job.config.RIPMETHOD)
# get MakeMKV disc number
logging.debug("Getting MakeMKV disc number")
cmd = 'makemkvcon -r info disc:9999 |grep {0} |grep -oP \'(?<=:).*?(?=,)\''.format(
job.devpath
)
try:
mdisc = subprocess.check_output(
cmd,
shell=True
).decode("utf-8")
logging.info("MakeMKV disc number: " + mdisc.strip())
# print("mdisc is: " + mdisc)
except subprocess.CalledProcessError as mdisc_error:
err = "Call to makemkv failed with code: " + str(mdisc_error.returncode) + "(" + str(mdisc_error.output) + ")"
logging.error(err)
raise RuntimeError(err)
# get filesystem in order
# if tv series, include the disk label in folder path. usually contains DISK1, DISK2 etc.
# this means duplicate rips does not need to be enabled
if job.video_type == "series":
rawpath = os.path.join(str(job.config.RAWPATH), str(job.title) + "__" +str(job.label))
else:
rawpath = os.path.join(str(job.config.RAWPATH), str(job.title))
logging.info("Destination is " + str(rawpath))
if not os.path.exists(rawpath):
try:
os.makedirs(rawpath)
except OSError:
# logging.error("Couldn't create the base file path: " + rawpath + " Probably a permissions error")
err = "Couldn't create the base file path: " + str(rawpath) + " Probably a permissions error"
else:
logging.info(rawpath + " exists. Adding timestamp.")
ts = round(time.time() * 100)
rawpath = os.path.join(str(job.config.RAWPATH), str(job.title) + "_" + str(ts))
logging.info("rawpath is " + str(rawpath))
try:
os.makedirs(rawpath)
except OSError:
# logging.error("Couldn't create the base file path: " + rawpath + " Probably a permissions error")
err = "Couldn't create the base file path: " + str(rawpath) + " Probably a permissions error"
sys.exit(err)
# rip bluray
if job.config.RIPMETHOD == "backup" and job.disctype == "bluray":
# backup method
cmd = 'makemkvcon backup --decrypt {0} -r disc:{1} {2}>> {3}'.format(
job.config.MKV_ARGS,
mdisc.strip(),
shlex.quote(rawpath),
logfile
)
logging.info("Backup up disc")
logging.debug("Backing up with the following command: " + cmd)
try:
mkv = subprocess.run(
cmd,
shell=True
)
# ).decode("utf-8")
# print("mkv is: " + mkv)
logging.debug("The exit code for MakeMKV is: " + str(mkv.returncode))
if mkv.returncode == 253:
# Makemkv is out of date
err = "MakeMKV version is too old. Upgrade and try again. MakeMKV returncode is '253'."
logging.error(err)
raise RuntimeError(err)
except subprocess.CalledProcessError as mdisc_error:
err = "Call to MakeMKV failed with code: " + str(mdisc_error.returncode) + "(" + str(mdisc_error.output) + ")"
logging.error(err)
# print("Error: " + mkv)
return None
elif job.config.RIPMETHOD == "mkv" or job.disctype == "dvd":
# mkv method
get_track_info(mdisc, job)
# if no maximum length, process the whole disc in one command
if int(job.config.MAXLENGTH) > 99998:
cmd = 'makemkvcon mkv {0} -r dev:{1} {2} {3} --minlength={4}>> {5}'.format(
job.config.MKV_ARGS,
job.devpath,
"all",
shlex.quote(rawpath),
job.config.MINLENGTH,
logfile
)
logging.debug("Ripping with the following command: " + cmd)
try:
mkv = subprocess.run(
cmd,
shell=True
)
# ).decode("utf-8")
# print("mkv is: " + mkv)
logging.debug("The exit code for MakeMKV is: " + str(mkv.returncode))
if mkv.returncode == 253:
# Makemkv is out of date
err = "MakeMKV version is too old. Upgrade and try again. MakeMKV returncode is '253'."
logging.error(err)
raise RuntimeError(err)
except subprocess.CalledProcessError as mdisc_error:
err = "Call to MakeMKV failed with code: " + str(mdisc_error.returncode) + "(" + str(mdisc_error.output) + ")"
logging.error(err)
# print("Error: " + mkv)
return None
else:
# process one track at a time based on track length
for track in job.tracks:
if track.length < int(job.config.MINLENGTH):
# too short
logging.info("Track #" + str(track.track_number) + " of " + str(job.no_of_titles) + ". Length (" + str(track.length) +
") is less than minimum length (" + job.config.MINLENGTH + "). Skipping")
elif track.length > int(job.config.MAXLENGTH):
# too long
logging.info("Track #" + str(track.track_number) + " of " + str(job.no_of_titles) + ". Length (" + str(track.length) +
") is greater than maximum length (" + job.config.MAXLENGTH + "). Skipping")
else:
# just right
logging.info("Processing track #" + str(track.track_number) + " of " + str(job.no_of_titles - 1) + ". Length is " +
str(track.length) + " seconds.")
# filename = "title_" + str.zfill(str(track.track_number), 2) + "." + cfg['DEST_EXT']
# filename = track.filename
filepathname = os.path.join(rawpath, track.filename)
logging.info("Ripping title " + str(track.track_number) + " to " + shlex.quote(filepathname))
# track.filename = track.orig_filename = filename
# db.session.commit()
cmd = 'makemkvcon mkv {0} -r dev:{1} {2} {3} --minlength={4}>> {5}'.format(
job.config.MKV_ARGS,
job.devpath,
str(track.track_number),
shlex.quote(rawpath),
job.config.MINLENGTH,
logfile
)
logging.debug("Ripping with the following command: " + cmd)
try:
mkv = subprocess.run(
cmd,
shell=True
)
# ).decode("utf-8")
# print("mkv is: " + mkv)
logging.debug("The exit code for MakeMKV is: " + str(mkv.returncode))
if mkv.returncode == 253:
# Makemkv is out of date
err = "MakeMKV version is too old. Upgrade and try again. MakeMKV returncode is '253'."
logging.error(err)
raise RuntimeError(err)
except subprocess.CalledProcessError as mdisc_error:
err = "Call to MakeMKV failed with code: " + str(mdisc_error.returncode) + "(" + str(mdisc_error.output) + ")"
logging.error(err)
# print("Error: " + mkv)
return None
else:
logging.info("I'm confused what to do.... Passing on MakeMKV")
job.eject()
logging.info("Exiting MakeMKV processing with return value of: " + rawpath)
return(rawpath)
def get_track_info(mdisc, job):
"""Use MakeMKV to get track info and updatte Track class\n
mdisc = MakeMKV disc number\n
job = Job instance\n
"""
logging.info("Using MakeMKV to get information on all the tracks on the disc. This will take a few minutes...")
cmd = 'makemkvcon -r --cache=1 info disc:{0}'.format(
mdisc
)
logging.debug("Sending command: %s", (cmd))
try:
mkv = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
).decode("utf-8").splitlines()
except subprocess.CalledProcessError as mdisc_error:
err = "Call to MakeMKV failed with code: " + str(mdisc_error.returncode) + "(" + str(mdisc_error.output) + ")"
logging.error(err)
return None
track = 0
fps = float(0)
aspect = ""
seconds = 0
filename = ""
for line in mkv:
if line.split(":")[0] in ("MSG", "TCOUNT", "CINFO", "TINFO", "SINFO"):
# print(line.rstrip())
line_split = line.split(":", 1)
msg_type = line_split[0]
msg = line_split[1].split(",")
line_track = int(msg[0])
if msg_type == "MSG":
if msg[0] == "5055":
job.errors = "MakeMKV evaluation period has expired. DVD processing will continus. Bluray processing will exit."
if job.disctype == "bluray":
err = "MakeMKV evaluation period has expired. Disc is a Bluray so ARM is exiting"
logging.error(err)
raise ValueError(err, "makemkv")
else:
logging.error("MakeMKV evaluation perios has ecpires. Disc is dvd so ARM will continue")
db.session.commit()
if msg_type == "TCOUNT":
titles = int(line_split[1].strip())
logging.info("Found " + str(titles) + " titles")
job.no_of_titles = titles
db.session.commit()
if msg_type == "TINFO":
if track != line_track:
if line_track == int(0):
pass
else:
utils.put_track(job, track, seconds, aspect, fps, False, "makemkv", filename)
track = line_track
if msg[1] == "27":
filename = msg[3].replace('"', '').strip()
if msg_type == "TINFO" and msg[1] == "9":
len_hms = msg[3].replace('"', '').strip()
h, m, s = len_hms.split(':')
seconds = int(h) * 3600 + int(m) * 60 + int(s)
if msg_type == "SINFO" and msg[1] == "0":
if msg[2] == "20":
aspect = msg[4].replace('"', '').strip()
elif msg[2] == "21":
fps = msg[4].split()[0]
fps = fps.replace('"', '').strip()
fps = float(fps)
utils.put_track(job, track, seconds, aspect, fps, False, "makemkv", filename)
| python | 11,426 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetDeploymentResult',
'AwaitableGetDeploymentResult',
'get_deployment',
]
@pulumi.output_type
class GetDeploymentResult:
"""
Deployment information.
"""
def __init__(__self__, location=None, name=None, properties=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
the location of the deployment.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the deployment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.DeploymentPropertiesExtendedResponse':
"""
Deployment properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Deployment tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the deployment.
"""
return pulumi.get(self, "type")
class AwaitableGetDeploymentResult(GetDeploymentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeploymentResult(
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_deployment(deployment_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeploymentResult:
"""
Use this data source to access information about an existing resource.
:param str deployment_name: The name of the deployment.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['deploymentName'] = deployment_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:resources/v20200601:getDeployment', __args__, opts=opts, typ=GetDeploymentResult).value
return AwaitableGetDeploymentResult(
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| python | 3,787 |
from simple_rpc import _get_metadata
author = _get_metadata('Author')
copyright = _get_metadata('Author')
project = _get_metadata('Name')
release = _get_metadata('Version')
autoclass_content = 'both'
extensions = ['sphinx.ext.autodoc', 'sphinx_autodoc_typehints', 'sphinxarg.ext']
master_doc = 'index'
| python | 305 |
import atexit
import os
import shutil
import tempfile
from django import VERSION
from django.conf import settings, global_settings
# Silence the warning about an insecure SECRET_KEY
global_settings.SECRET_KEY = 'SUPER_SAFE_TESTING_KEY'
settings.configure(default_settings=global_settings)
from graphite.settings import * # noqa
if VERSION < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
# Temporaray directories
def atexit_tmpremover(dirname):
""" Utility to remove a temporary directory during program exit. """
try:
shutil.rmtree(dirname)
print("Removed temporary directory: %s" % dirname)
except OSError:
# if the temp dir was removed already by other means
pass
# create a temporary directory
TEMP_GRAPHITE_DIR = tempfile.mkdtemp(prefix='graphite-test-')
atexit.register(atexit_tmpremover, TEMP_GRAPHITE_DIR)
LOG_DIR = os.path.join(TEMP_GRAPHITE_DIR, 'log')
os.mkdir(LOG_DIR)
WHISPER_DIR = os.path.join(TEMP_GRAPHITE_DIR, 'whisper/')
os.mkdir(WHISPER_DIR)
# Manually add WHISPER_DIR to STANDARD_DIRS
# STANDARD_DIRS is generated programtically in settings.py, the modification of
# WHISPER_DIR above does not change the value in STANDARD_DIRS.
STANDARD_DIRS = [WHISPER_DIR]
INDEX_FILE = os.path.join(TEMP_GRAPHITE_DIR, 'index')
URL_PREFIX = '/graphite'
| python | 1,431 |
# This file was auto generated; Do not modify, if you value your sanity!
import ctypes
class s_disk_format_progress(ctypes.Structure):
_pack_ = 2
_fields_ = [
('state', ctypes.c_uint16),
('sectorsRemaining', ctypes.c_uint8 * 8),
]
# Extra names go here:
SDiskFormatProgress = s_disk_format_progress
# End of extra names
| python | 353 |
# util function
import hashlib
def wash_string(s):
'''
clean str `\n\t` `space`
'''
s = s.replace('\n', '')
s = s.replace('\t', '')
s = s.replace('\r', '')
return s.strip(' ')
def encode_url(url):
'''encode_url to Hex string'''
hash_md5 = hashlib.md5(url.encode('utf-8'))
return hash_md5.hexdigest()
| python | 345 |
"""
ASGI config for vycenter project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vycenter.settings')
application = get_asgi_application()
| python | 393 |
import os
import pkg_resources
from setuptools import setup
PATH_ROOT = os.path.realpath(os.path.dirname(__file__))
REQUIREMENTS_PATH = os.path.join(PATH_ROOT, 'requirements.txt')
def parse_requirements(path: str):
with open(path) as fp:
text = fp.read()
requirements = [
str(requirement) for requirement
in pkg_resources.parse_requirements(text)
]
return requirements
setup(
name='squeezer',
version='0.1.0',
packages=['squeezer'],
url='https://github.com/esceptico/squeezer',
license='MIT',
author_email='[email protected]',
description='Lightweight knowledge distillation pipeline',
install_requires=parse_requirements(REQUIREMENTS_PATH)
)
| python | 724 |
from .parameters import Parameter, File, Source, Self
from .task import Task, cmd
from .workflow import Workflow, set_Threads
from .log import Log
| python | 147 |
from turtle import *
import random
import pygame
pygame.mixer.init()
pygame.mixer.music.load("md.mp3")
pygame.mixer.music.play()
w=Turtle()
s=Screen()
w.penup()
w.setpos(-450,-999)
w.pendown()
w.lt(135)
w.speed(0)
def border():
w.fd(50)
w.rt(90)
w.fd(50)
w.lt(90)
def borfunc(fl,sl):
for i in range(2):
for i in range(fl):
border()
w.rt(90)
for i in range(sl):
border()
w.rt(90)
w.color("red")
borfunc(28,13)
w.color("white")
w.home()
w.lt(90)
w.speed(1)
w.color("red")
w.setpos(0,-300)
w.pensize(15)
w.fd(600)
w.penup()
w.setpos(0,300)
w.pendown()
w.lt(90)
for i in range(45):
w.lt(2)
w.bk(5)
w.bk(100)
w.rt(60)
w.fd(25)
w.lt(150)
w.fd(40)
w.lt(150)
w.fd(25)
for i in range(5):
w.lt(2)
w.bk(5)
w.penup()
w.setpos(0,300)
w.pendown()
w.rt(160)
for i in range(45):
w.rt(2)
w.bk(5)
w.bk(100)
w.rt(60)
w.fd(25)
w.lt(150)
w.fd(40)
w.lt(150)
w.fd(25)
w.penup()
w.setpos(0,300)
w.pendown()
w.rt(60)
w.fd(280)
w.lt(135)
for i in range(4):
w.fd(25)
w.lt(90)
w.penup()
w.setpos(0,140)
w.pendown()
w.lt(45)
w.fd(75)
w.rt(135)
w.fd(150)
w.lt(135)
w.fd(200)
w.lt(135)
w.fd(300)
w.rt(135)
w.fd(200)
w.rt(130)
w.fd(150)
w.penup()
w.setpos(0,-300)
w.pendown()
w.color("red","red")
w.begin_fill()
w.lt(85)
w.fd(50)
w.lt(135)
w.fd(75)
w.lt(135)
w.fd(50)
w.end_fill()
w.penup()
w.setpos(0,900)
w.pendown()
w.rt(135)
#omkar
w.fd(100)
w.bk(200)
w.fd(100)
for i in range(1):
#firstarc
for i in range(90):
w.rt(2)
w.fd(2)
w.rt(180)
#2nd arc"
for i in range(100):
w.rt(2)
w.fd(2)
w.lt(20)
w.penup()
w.setpos(0,790)
w.pendown()
w.rt(180)
w.pensize(8)
for i in range(40):
w.rt(2)
w.fd(3)
for i in range(60):
w.lt(2)
w.fd(3)
#chandrabindu
w.pensize(7)
w.penup()
w.setpos(86,986)
w.pendown()
w.rt(130)
for i in range(90):
w.rt(2)
w.fd(3)
w.penup()
w.setpos(10,945)
w.pendown()
w.color("red")
w.begin_fill()
w.circle(10)
w.end_fill()
done() | python | 2,138 |
# -*- coding: utf-8 -*-
r"""
Chain complexes
AUTHORS:
- John H. Palmieri (2009-04)
This module implements bounded chain complexes of free `R`-modules,
for any commutative ring `R` (although the interesting things, like
homology, only work if `R` is the integers or a field).
Fix a ring `R`. A chain complex over `R` is a collection of
`R`-modules `\{C_n\}` indexed by the integers, with `R`-module maps
`d_n : C_n \rightarrow C_{n+1}` such that `d_{n+1} \circ d_n = 0` for
all `n`. The maps `d_n` are called *differentials*.
One can vary this somewhat: the differentials may decrease degree by
one instead of increasing it: sometimes a chain complex is defined
with `d_n : C_n \rightarrow C_{n-1}` for each `n`. Indeed, the
differentials may change dimension by any fixed integer.
Also, the modules may be indexed over an abelian group other than the
integers, e.g., `\ZZ^{m}` for some integer `m \geq 1`, in which case
the differentials may change the grading by any element of that
grading group. The elements of the grading group are generally called
degrees, so `C_n` is the module in degree `n` and so on.
In this implementation, the ring `R` must be commutative and the
modules `C_n` must be free `R`-modules. As noted above, homology
calculations will only work if the ring `R` is either `\ZZ` or a
field. The modules may be indexed by any free abelian group. The
differentials may increase degree by 1 or decrease it, or indeed
change it by any fixed amount: this is controlled by the
``degree_of_differential`` parameter used in defining the chain
complex.
"""
########################################################################
# Copyright (C) 2013 John H. Palmieri <[email protected]>
# Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# http://www.gnu.org/licenses/
########################################################################
from six import iteritems
from copy import copy
from sage.structure.parent import Parent
from sage.structure.element import ModuleElement, is_Vector, coercion_model
from sage.misc.cachefunc import cached_method
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.modules.free_module import FreeModule
from sage.modules.free_module_element import vector
from sage.matrix.matrix0 import Matrix
from sage.matrix.constructor import matrix
from sage.misc.latex import latex
from sage.rings.all import GF, prime_range
from sage.misc.decorators import rename_keyword
from sage.homology.homology_group import HomologyGroup
from functools import reduce
def _latex_module(R, m):
"""
LaTeX string representing a free module over ``R`` of rank ``m``.
INPUT:
- ``R`` -- a commutative ring
- ``m`` -- non-negative integer
This is used by the ``_latex_`` method for chain complexes.
EXAMPLES::
sage: from sage.homology.chain_complex import _latex_module
sage: _latex_module(ZZ, 3)
'\\Bold{Z}^{3}'
sage: _latex_module(ZZ, 0)
'0'
sage: _latex_module(GF(3), 1)
'\\Bold{F}_{3}^{1}'
"""
if m == 0:
return str(latex(0))
return str(latex(FreeModule(R, m)))
@rename_keyword(deprecation=15151, check_products='check', check_diffs='check')
def ChainComplex(data=None, base_ring=None, grading_group=None,
degree_of_differential=1, degree=1,
check=True):
r"""
Define a chain complex.
INPUT:
- ``data`` -- the data defining the chain complex; see below for
more details.
The following keyword arguments are supported:
- ``base_ring`` -- a commutative ring (optional), the ring over
which the chain complex is defined. If this is not specified,
it is determined by the data defining the chain complex.
- ``grading_group`` -- a additive free abelian group (optional,
default ``ZZ``), the group over which the chain complex is
indexed.
- ``degree_of_differential`` -- element of grading_group
(optional, default ``1``). The degree of the differential.
- ``degree`` -- alias for ``degree_of_differential``.
- ``check`` -- boolean (optional, default ``True``). If ``True``,
check that each consecutive pair of differentials are
composable and have composite equal to zero.
OUTPUT:
A chain complex.
.. WARNING::
Right now, homology calculations will only work if the base
ring is either `\ZZ` or a field, so please take this into account
when defining a chain complex.
Use data to define the chain complex. This may be in any of the
following forms.
1. a dictionary with integers (or more generally, elements of
grading_group) for keys, and with ``data[n]`` a matrix representing
(via left multiplication) the differential coming from degree
`n`. (Note that the shape of the matrix then determines the
rank of the free modules `C_n` and `C_{n+d}`.)
2. a list/tuple/iterable of the form `[C_0, d_0, C_1, d_1, C_2,
d_2, ...]`, where each `C_i` is a free module and each `d_i` is
a matrix, as above. This only makes sense if ``grading_group``
is `\ZZ` and ``degree`` is 1.
3. a list/tuple/iterable of the form `[r_0, d_0, r_1, d_1, r_2,
d_2, \ldots]`, where `r_i` is the rank of the free module `C_i`
and each `d_i` is a matrix, as above. This only makes sense if
``grading_group`` is `\ZZ` and ``degree`` is 1.
4. a list/tuple/iterable of the form `[d_0, d_1, d_2, \ldots]` where
each `d_i` is a matrix, as above. This only makes sense if
``grading_group`` is `\ZZ` and ``degree`` is 1.
.. NOTE::
In fact, the free modules `C_i` in case 2 and the ranks `r_i`
in case 3 are ignored: only the matrices are kept, and from
their shapes, the ranks of the modules are determined.
(Indeed, if ``data`` is a list or tuple, then any element which
is not a matrix is discarded; thus the list may have any number
of different things in it, and all of the non-matrices will be
ignored.) No error checking is done to make sure, for
instance, that the given modules have the appropriate ranks for
the given matrices. However, as long as ``check`` is True, the
code checks to see if the matrices are composable and that each
appropriate composite is zero.
If the base ring is not specified, then the matrices are examined
to determine a ring over which they are all naturally defined, and
this becomes the base ring for the complex. If no such ring can
be found, an error is raised. If the base ring is specified, then
the matrices are converted automatically to this ring when
defining the chain complex. If some matrix cannot be converted,
then an error is raised.
EXAMPLES::
sage: ChainComplex()
Trivial chain complex over Integer Ring
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C
Chain complex with at most 2 nonzero terms over Integer Ring
sage: m = matrix(ZZ, 2, 2, [0, 1, 0, 0])
sage: D = ChainComplex([m, m], base_ring=GF(2)); D
Chain complex with at most 3 nonzero terms over Finite Field of size 2
sage: D == loads(dumps(D))
True
sage: D.differential(0)==m, m.is_immutable(), D.differential(0).is_immutable()
(True, False, True)
Note that when a chain complex is defined in Sage, new
differentials may be created: every nonzero module in the chain
complex must have a differential coming from it, even if that
differential is zero::
sage: IZ = ChainComplex({0: identity_matrix(ZZ, 1)})
sage: IZ.differential() # the differentials in the chain complex
{-1: [], 0: [1], 1: []}
sage: IZ.differential(1).parent()
Full MatrixSpace of 0 by 1 dense matrices over Integer Ring
sage: mat = ChainComplex({0: matrix(ZZ, 3, 4)}).differential(1)
sage: mat.nrows(), mat.ncols()
(0, 3)
Defining the base ring implicitly::
sage: ChainComplex([matrix(QQ, 3, 1), matrix(ZZ, 4, 3)])
Chain complex with at most 3 nonzero terms over Rational Field
sage: ChainComplex([matrix(GF(125, 'a'), 3, 1), matrix(ZZ, 4, 3)])
Chain complex with at most 3 nonzero terms over Finite Field in a of size 5^3
If the matrices are defined over incompatible rings, an error results::
sage: ChainComplex([matrix(GF(125, 'a'), 3, 1), matrix(QQ, 4, 3)])
Traceback (most recent call last):
...
TypeError: no common canonical parent for objects with parents: 'Finite Field in a of size 5^3' and 'Rational Field'
If the base ring is given explicitly but is not compatible with
the matrices, an error results::
sage: ChainComplex([matrix(GF(125, 'a'), 3, 1)], base_ring=QQ)
Traceback (most recent call last):
...
TypeError: unable to convert 0 to a rational
"""
if grading_group is None:
grading_group = ZZ
if degree_of_differential != 1 and degree != 1:
raise ValueError('specify only one of degree_of_differential or degree, not both')
if degree_of_differential != 1:
degree = degree_of_differential
try:
degree = grading_group(degree)
except Exception:
raise ValueError('degree is not an element of the grading group')
# transform data into data_dict
if data is None or (isinstance(data, (list, tuple)) and len(data) == 0):
data_dict = {}
elif isinstance(data, dict): # data is dictionary
data_dict = data
else: # data is list/tuple/iterable
data_matrices = [x for x in data if isinstance(x, Matrix)]
if degree != 1:
raise ValueError('degree must be +1 if the data argument is a list or tuple')
if grading_group != ZZ:
raise ValueError('grading_group must be ZZ if the data argument is a list or tuple')
data_dict = {grading_group(i): m for i, m in enumerate(data_matrices)}
if base_ring is None:
if not data_dict:
base_ring = ZZ
else:
bases = tuple(x.base_ring() for x in data_dict.values())
base_ring = coercion_model.common_parent(*bases)
# make sure values in data_dict are appropriate matrices
for n in list(data_dict):
if not n in grading_group:
raise ValueError('one of the dictionary keys is not an element of the grading group')
mat = data_dict[n]
if not isinstance(mat, Matrix):
raise TypeError('one of the differentials in the data is not a matrix')
if mat.base_ring() is base_ring:
if not mat.is_immutable():
mat = copy(mat) # do not make any arguments passed immutable
mat.set_immutable()
else:
mat = mat.change_ring(base_ring)
mat.set_immutable()
data_dict[n] = mat
# include any "obvious" zero matrices that are not 0x0
for n in list(data_dict): # note: data_dict will be mutated in this loop
mat1 = data_dict[n]
if (mat1.nrows(), mat1.ncols()) == (0, 0):
del data_dict[n]
if (mat1.nrows() != 0) and (n+degree not in data_dict):
if n+2*degree in data_dict:
mat2 = matrix(base_ring, data_dict[n+2*degree].ncols(), mat1.nrows())
else:
mat2 = matrix(base_ring, 0, mat1.nrows())
mat2.set_immutable()
data_dict[n+degree] = mat2
if (mat1.ncols() != 0) and (n-degree not in data_dict):
if n-2*degree in data_dict:
mat0 = matrix(base_ring, mat1.ncols(), data_dict[n-2*degree].nrows())
else:
mat0 = matrix(base_ring, mat1.ncols(), 0)
mat0.set_immutable()
data_dict[n-degree] = mat0
# check that this is a complex: going twice is zero
if check:
for n in data_dict:
mat0 = data_dict[n]
try:
mat1 = data_dict[n+degree]
except KeyError:
continue
try:
prod = mat1 * mat0
except TypeError:
raise TypeError('the differentials d_{{{}}} and d_{{{}}} are not compatible: '
'their product is not defined'.format(n, n+degree))
if not prod.is_zero():
raise ValueError('the differentials d_{{{}}} and d_{{{}}} are not compatible: '
'their composition is not zero.'.format(n, n+degree))
return ChainComplex_class(grading_group, degree, base_ring, data_dict)
class Chain_class(ModuleElement):
def __init__(self, parent, vectors, check=True):
r"""
A Chain in a Chain Complex
A chain is collection of module elements for each module `C_n`
of the chain complex `(C_n, d_n)`. There is no restriction on
how the differentials `d_n` act on the elements of the chain.
.. NOTE::
You must use the chain complex to construct chains.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])}, base_ring=GF(7))
sage: C.category()
Category of chain complexes over Finite Field of size 7
TESTS::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([0, 1, 2]), 1:vector([3, 4])})
sage: TestSuite(c).run()
"""
# only nonzero vectors shall be stored, ensuring this is the
# job of the _element constructor_
assert all(v.is_immutable() and not v.is_zero()
and v.base_ring() is parent.base_ring()
for v in vectors.values())
self._vec = vectors
super(Chain_class, self).__init__(parent)
def vector(self, degree):
"""
Return the free module element in ``degree``.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([1, 2, 3]), 1:vector([4, 5])})
sage: c.vector(0)
(1, 2, 3)
sage: c.vector(1)
(4, 5)
sage: c.vector(2)
()
"""
try:
return self._vec[degree]
except KeyError:
return self.parent().free_module(degree).zero()
def _repr_(self):
"""
Print representation.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C()
Trivial chain
sage: C({0:vector([1, 2, 3])})
Chain(0:(1, 2, 3))
sage: c = C({0:vector([1, 2, 3]), 1:vector([4, 5])}); c
Chain with 2 nonzero terms over Integer Ring
sage: c._repr_()
'Chain with 2 nonzero terms over Integer Ring'
"""
n = len(self._vec)
if n == 0:
return 'Trivial chain'
if n == 1:
deg, vec = next(iteritems(self._vec))
return 'Chain({0}:{1})'.format(deg, vec)
return 'Chain with {0} nonzero terms over {1}'.format(
n, self.parent().base_ring())
def _ascii_art_(self):
"""
Return an ascii art representation.
Note that arrows go to the left so that composition of
differentials is the usual matrix multiplication.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0]), 1:zero_matrix(1,2)})
sage: c = C({0:vector([1, 2, 3]), 1:vector([4, 5])})
sage: ascii_art(c)
d_2 d_1 d_0 [1] d_-1
0 <---- [0] <---- [4] <---- [2] <----- 0
[5] [3]
"""
from sage.typeset.ascii_art import AsciiArt
def arrow_art(d):
d_str = [' d_{0} '.format(d)]
arrow = ' <' + '-'*(len(d_str[0])-3) + ' '
d_str.append(arrow)
return AsciiArt(d_str, baseline=0)
def vector_art(d):
v = self.vector(d)
if v.degree() == 0:
return AsciiArt(['0'])
v = str(v.column()).splitlines()
return AsciiArt(v, baseline=len(v)//2)
result = []
chain_complex = self.parent()
for ordered in chain_complex.ordered_degrees():
ordered = list(reversed(ordered))
if len(ordered) == 0:
return AsciiArt(['0'])
result_ordered = vector_art(ordered[0] + chain_complex.degree_of_differential())
for n in ordered:
result_ordered += arrow_art(n) + vector_art(n)
result = [result_ordered] + result
concatenated = result[0]
for r in result[1:]:
concatenated += AsciiArt([' ... ']) + r
return concatenated
def _unicode_art_(self):
"""
Return a unicode art representation.
Note that arrows go to the left so that composition of
differentials is the usual matrix multiplication.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0]), 1:zero_matrix(1,2)})
sage: c = C({0:vector([1, 2, 3]), 1:vector([4, 5])})
sage: unicode_art(c)
⎛1⎞
d_2 d_1 ⎛4⎞ d_0 ⎜2⎟ d_-1
0 ⟵──── (0) ⟵──── ⎝5⎠ ⟵──── ⎝3⎠ ⟵───── 0
"""
from sage.typeset.unicode_art import UnicodeArt
def arrow_art(d):
d_str = [u' d_{0} '.format(d)]
arrow = u' ⟵' + u'─' * (len(d_str[0]) - 3) + u' '
d_str.append(arrow)
return UnicodeArt(d_str, baseline=0)
def vector_art(d):
v = self.vector(d)
if not v.degree():
return UnicodeArt([u'0'])
w = matrix(v).transpose()
return w._unicode_art_()
result = []
chain_complex = self.parent()
for ordered in chain_complex.ordered_degrees():
ordered = list(reversed(ordered))
if not ordered:
return UnicodeArt([u'0'])
result_ordered = vector_art(ordered[0] +
chain_complex.degree_of_differential())
for n in ordered:
result_ordered += arrow_art(n) + vector_art(n)
result = [result_ordered] + result
concatenated = result[0]
for r in result[1:]:
concatenated += UnicodeArt([u' ... ']) + r
return concatenated
def is_cycle(self):
"""
Return whether the chain is a cycle.
OUTPUT:
Boolean. Whether the elements of the chain are in the kernel
of the differentials.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([0, 1, 2]), 1:vector([3, 4])})
sage: c.is_cycle()
True
"""
chain_complex = self.parent()
for d, v in iteritems(self._vec):
dv = chain_complex.differential(d) * v
if not dv.is_zero():
return False
return True
def is_boundary(self):
"""
Return whether the chain is a boundary.
OUTPUT:
Boolean. Whether the elements of the chain are in the image of
the differentials.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([0, 1, 2]), 1:vector([3, 4])})
sage: c.is_boundary()
False
sage: z3 = C({1:(1, 0)})
sage: z3.is_cycle()
True
sage: (2*z3).is_boundary()
False
sage: (3*z3).is_boundary()
True
"""
chain_complex = self.parent()
for d, v in iteritems(self._vec):
d = chain_complex.differential(d - chain_complex.degree_of_differential()).transpose()
if v not in d.image():
return False
return True
def _add_(self, other):
"""
Module addition
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([0, 1, 2]), 1:vector([3, 4])})
sage: c + c
Chain with 2 nonzero terms over Integer Ring
sage: ascii_art(c + c)
d_1 d_0 [0] d_-1
0 <---- [6] <---- [2] <----- 0
[8] [4]
"""
vectors = {}
for d in set(list(self._vec) + list(other._vec)):
v = self.vector(d) + other.vector(d)
if not v.is_zero():
v.set_immutable()
vectors[d] = v
parent = self.parent()
return parent.element_class(parent, vectors)
def _lmul_(self, scalar):
"""
Scalar multiplication
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([0, 1, 2]), 1:vector([3, 4])})
sage: 2 * c
Chain with 2 nonzero terms over Integer Ring
sage: 2 * c == c + c == c * 2
True
"""
vectors = dict()
for d, v in iteritems(self._vec):
v = scalar * v
if not v.is_zero():
v.set_immutable()
vectors[d] = v
parent = self.parent()
return parent.element_class(parent, vectors)
def __eq__(self, other):
"""
Return ``True`` if this chain is equal to ``other``.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([0, 1, 2]), 1:vector([3, 4])})
sage: c == c
True
sage: c == C(0)
False
"""
if type(self) != type(other) or self.parent() != other.parent():
return False
return self._vec == other._vec
def __ne__(self, other):
"""
Return ``True`` if this chain is not equal to ``other``.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: c = C({0:vector([0, 1, 2]), 1:vector([3, 4])})
sage: c != c
False
sage: c != C(0)
True
"""
return not self == other
class ChainComplex_class(Parent):
r"""
See :func:`ChainComplex` for full documentation.
The differentials are required to be in the following canonical form:
* All differentials that are not `0 \times 0` must be specified
(even if they have zero rows or zero columns), and
* Differentials that are `0 \times 0` must not be specified.
* Immutable matrices over the ``base_ring``
This and more is ensured by the assertions in the
constructor. The :func:`ChainComplex` factory function must
ensure that only valid input is passed.
EXAMPLES::
sage: C = ChainComplex(); C
Trivial chain complex over Integer Ring
sage: D = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: D
Chain complex with at most 2 nonzero terms over Integer Ring
"""
def __init__(self, grading_group, degree_of_differential, base_ring, differentials):
"""
Initialize ``self``.
TESTS::
sage: ChainComplex().base_ring()
Integer Ring
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: TestSuite(C).run()
"""
if any(d.base_ring() != base_ring or not d.is_immutable() or
(d.ncols(), d.nrows()) == (0, 0)
for d in differentials.values()):
raise ValueError('invalid differentials')
if degree_of_differential.parent() is not grading_group:
raise ValueError('the degree_of_differential.parent() must be grading_group')
if grading_group is not ZZ and grading_group.is_multiplicative():
raise ValueError('grading_group must be either ZZ or multiplicative')
# all differentials (excluding the 0x0 ones) must be specified to the constructor
if any(dim+degree_of_differential not in differentials and d.nrows() != 0
for dim, d in iteritems(differentials)):
raise ValueError('invalid differentials')
if any(dim-degree_of_differential not in differentials and d.ncols() != 0
for dim, d in iteritems(differentials)):
raise ValueError('invalid differentials')
self._grading_group = grading_group
self._degree_of_differential = degree_of_differential
self._diff = differentials
from sage.categories.all import ChainComplexes
category = ChainComplexes(base_ring)
super(ChainComplex_class, self).__init__(base=base_ring, category=category)
Element = Chain_class
def _element_constructor_(self, vectors, check=True):
"""
The element constructor.
This is part of the Parent/Element framework. Calling the
parent uses this method to construct elements.
TESTS::
sage: D = ChainComplex({0: matrix(ZZ, 2, 2, [1,0,0,2])})
sage: D._element_constructor_(0)
Trivial chain
sage: D({0:[2, 3]})
Chain(0:(2, 3))
"""
if not vectors: # special case: the zero chain
return self.element_class(self, {})
if isinstance(vectors, Chain_class):
vectors = vectors._vec
data = dict()
for degree, vec in iteritems(vectors):
if not is_Vector(vec):
vec = vector(self.base_ring(), vec)
vec.set_immutable()
if check and vec.degree() != self.free_module_rank(degree):
raise ValueError('vector dimension does not match module dimension')
if vec.is_zero():
continue
if vec.base_ring() != self.base_ring():
vec = vec.change_ring(self.base_ring())
if not vec.is_immutable():
vec = copy(vec)
vec.set_immutable()
data[degree] = vec
return self.element_class(self, data)
def random_element(self):
"""
Return a random element.
EXAMPLES::
sage: D = ChainComplex({0: matrix(ZZ, 2, 2, [1,0,0,2])})
sage: D.random_element() # random output
Chain with 1 nonzero terms over Integer Ring
"""
vec = dict()
for d in self.nonzero_degrees():
vec[d] = self.free_module(d).random_element()
return self(vec)
_an_element_ = random_element
@cached_method
def rank(self, degree, ring=None):
r"""
Return the rank of a differential
INPUT:
- ``degree`` -- an element `\delta` of the grading
group. Which differential `d_{\delta}` we want to know the
rank of
- ``ring`` -- (optional) a commutative ring `S`;
if specified, the rank is computed after changing to this ring
OUTPUT:
The rank of the differential `d_{\delta} \otimes_R S`, where
`R` is the base ring of the chain complex.
EXAMPLES::
sage: C = ChainComplex({0:matrix(ZZ, [[2]])})
sage: C.differential(0)
[2]
sage: C.rank(0)
1
sage: C.rank(0, ring=GF(2))
0
"""
degree = self.grading_group()(degree)
try:
d = self._diff[degree]
except IndexError:
return ZZ.zero()
if d.nrows() == 0 or d.ncols() == 0:
return ZZ.zero()
if ring is None:
return d.rank()
return d.change_ring(ring).rank()
def grading_group(self):
r"""
Return the grading group.
OUTPUT:
The discrete abelian group that indexes the individual modules
of the complex. Usually `\ZZ`.
EXAMPLES::
sage: G = AdditiveAbelianGroup([0, 3])
sage: C = ChainComplex(grading_group=G, degree=G(vector([1,2])))
sage: C.grading_group()
Additive abelian group isomorphic to Z + Z/3
sage: C.degree_of_differential()
(1, 2)
"""
return self._grading_group
@cached_method
def nonzero_degrees(self):
r"""
Return the degrees in which the module is non-trivial.
See also :meth:`ordered_degrees`.
OUTPUT:
The tuple containing all degrees `n` (grading group elements)
such that the module `C_n` of the chain is non-trivial.
EXAMPLES::
sage: one = matrix(ZZ, [[1]])
sage: D = ChainComplex({0: one, 2: one, 6:one})
sage: ascii_art(D)
[1] [1] [0] [1]
0 <-- C_7 <---- C_6 <-- 0 ... 0 <-- C_3 <---- C_2 <---- C_1 <---- C_0 <-- 0
sage: D.nonzero_degrees()
(0, 1, 2, 3, 6, 7)
"""
return tuple(sorted(n for n, d in iteritems(self._diff)
if d.ncols()))
@cached_method
def ordered_degrees(self, start=None, exclude_first=False):
r"""
Sort the degrees in the order determined by the differential
INPUT:
- ``start`` -- (default: ``None``) a degree (element of the grading
group) or ``None``
- ``exclude_first`` -- boolean (optional; default:
``False``); whether to exclude the lowest degree -- this is a
handy way to just get the degrees of the non-zero modules,
as the domain of the first differential is zero.
OUTPUT:
If ``start`` has been specified, the longest tuple of degrees
* containing ``start`` (unless ``start`` would be the first
and ``exclude_first=True``),
* in ascending order relative to :meth:`degree_of_differential`, and
* such that none of the corresponding differentials are `0\times 0`.
If ``start`` has not been specified, a tuple of such tuples of
degrees. One for each sequence of non-zero differentials. They
are returned in sort order.
EXAMPLES::
sage: one = matrix(ZZ, [[1]])
sage: D = ChainComplex({0: one, 2: one, 6:one})
sage: ascii_art(D)
[1] [1] [0] [1]
0 <-- C_7 <---- C_6 <-- 0 ... 0 <-- C_3 <---- C_2 <---- C_1 <---- C_0 <-- 0
sage: D.ordered_degrees()
((-1, 0, 1, 2, 3), (5, 6, 7))
sage: D.ordered_degrees(exclude_first=True)
((0, 1, 2, 3), (6, 7))
sage: D.ordered_degrees(6)
(5, 6, 7)
sage: D.ordered_degrees(5, exclude_first=True)
(6, 7)
"""
if start is None:
result = []
degrees = set(self._diff)
while len(degrees) > 0:
ordered = self.ordered_degrees(degrees.pop())
degrees.difference_update(ordered)
if exclude_first:
ordered = tuple(ordered[1:])
result.append(ordered)
result.sort()
return tuple(result)
import collections
result = collections.deque()
result.append(start)
next_deg = start + self.degree_of_differential()
while next_deg in self._diff:
result.append(next_deg)
next_deg += self.degree_of_differential()
prev_deg = start - self.degree_of_differential()
while prev_deg in self._diff:
result.appendleft(prev_deg)
prev_deg -= self.degree_of_differential()
if exclude_first:
result.popleft()
return tuple(result)
def degree_of_differential(self):
"""
Return the degree of the differentials of the complex
OUTPUT:
An element of the grading group.
EXAMPLES::
sage: D = ChainComplex({0: matrix(ZZ, 2, 2, [1,0,0,2])})
sage: D.degree_of_differential()
1
"""
return self._degree_of_differential
def differential(self, dim=None):
"""
The differentials which make up the chain complex.
INPUT:
- ``dim`` -- element of the grading group (optional, default
``None``); if this is ``None``, return a dictionary of all
of the differentials, or if this is a single element, return
the differential starting in that dimension
OUTPUT:
Either a dictionary of all of the differentials or a single
differential (i.e., a matrix).
EXAMPLES::
sage: D = ChainComplex({0: matrix(ZZ, 2, 2, [1,0,0,2])})
sage: D.differential()
{-1: [], 0: [1 0]
[0 2], 1: []}
sage: D.differential(0)
[1 0]
[0 2]
sage: C = ChainComplex({0: identity_matrix(ZZ, 40)})
sage: C.differential()
{-1: 40 x 0 dense matrix over Integer Ring,
0: 40 x 40 dense matrix over Integer Ring,
1: []}
"""
if dim is None:
return copy(self._diff)
dim = self.grading_group()(dim)
try:
return self._diff[dim]
except KeyError:
pass
# all differentials that are not 0x0 are in self._diff
return matrix(self.base_ring(), 0, 0)
def dual(self):
"""
The dual chain complex to ``self``.
Since all modules in ``self`` are free of finite rank, the
dual in dimension `n` is isomorphic to the original chain
complex in dimension `n`, and the corresponding boundary
matrix is the transpose of the matrix in the original complex.
This converts a chain complex to a cochain complex and vice versa.
EXAMPLES::
sage: C = ChainComplex({2: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C.degree_of_differential()
1
sage: C.differential(2)
[3 0 0]
[0 0 0]
sage: C.dual().degree_of_differential()
-1
sage: C.dual().differential(3)
[3 0]
[0 0]
[0 0]
"""
data = {}
deg = self.degree_of_differential()
for d in self.differential():
data[(d+deg)] = self.differential()[d].transpose()
return ChainComplex(data, degree=-deg)
def free_module_rank(self, degree):
r"""
Return the rank of the free module at the given ``degree``.
INPUT:
- ``degree`` -- an element of the grading group
OUTPUT:
Integer. The rank of the free module `C_n` at the given degree
`n`.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0]), 1: matrix(ZZ, [[0, 1]])})
sage: [C.free_module_rank(i) for i in range(-2, 5)]
[0, 0, 3, 2, 1, 0, 0]
"""
try:
return self._diff[degree].ncols()
except KeyError:
return ZZ.zero()
def free_module(self, degree=None):
r"""
Return the free module at fixed ``degree``, or their sum.
INPUT:
- ``degree`` -- an element of the grading group or ``None`` (default).
OUTPUT:
The free module `C_n` at the given degree `n`. If the degree
is not specified, the sum `\bigoplus C_n` is returned.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0]), 1: matrix(ZZ, [[0, 1]])})
sage: C.free_module()
Ambient free module of rank 6 over the principal ideal domain Integer Ring
sage: C.free_module(0)
Ambient free module of rank 3 over the principal ideal domain Integer Ring
sage: C.free_module(1)
Ambient free module of rank 2 over the principal ideal domain Integer Ring
sage: C.free_module(2)
Ambient free module of rank 1 over the principal ideal domain Integer Ring
"""
if degree is None:
rank = sum([mat.ncols() for mat in self.differential().values()])
else:
rank = self.free_module_rank(degree)
return FreeModule(self.base_ring(), rank)
def __eq__(self, other):
"""
Return ``True`` iff this chain complex is the same as other: that
is, if the base rings and the matrices of the two are the
same.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])}, base_ring=GF(2))
sage: D = ChainComplex({0: matrix(GF(2), 2, 3, [1, 0, 0, 0, 0, 0]), 1: matrix(ZZ, 0, 2), 3: matrix(ZZ, 0, 0)}) # base_ring determined from the matrices
sage: C == D
True
"""
if not isinstance(other, ChainComplex_class) or self.base_ring() != other.base_ring():
return False
R = self.base_ring()
equal = True
for d, mat in iteritems(self.differential()):
if d not in other.differential():
equal = equal and mat.ncols() == 0 and mat.nrows() == 0
else:
equal = (equal and
other.differential()[d].change_ring(R) == mat.change_ring(R))
for d, mat in iteritems(other.differential()):
if d not in self.differential():
equal = equal and mat.ncols() == 0 and mat.nrows() == 0
return equal
def __ne__(self, other):
"""
Return ``True`` iff this chain complex is not the same as other.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])}, base_ring=GF(2))
sage: D = ChainComplex({0: matrix(GF(2), 2, 3, [1, 0, 0, 0, 0, 0]), 1: matrix(ZZ, 0, 2), 3: matrix(ZZ, 0, 0)}) # base_ring determined from the matrices
sage: C != D
False
sage: E = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])}, base_ring=ZZ)
sage: C != E
True
"""
return not self == other
def _homology_chomp(self, deg, base_ring, verbose, generators):
"""
Helper function for :meth:`homology`.
INPUT:
- ``deg`` -- integer (one specific homology group) or ``None``
(all of those that can be non-zero)
- ``base_ring`` -- the base ring (must be the integers
or a prime field)
- ``verbose`` -- boolean, whether to print some messages
- ``generators`` -- boolean, whether to also return generators
for homology
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])}, base_ring=GF(2))
sage: C._homology_chomp(None, GF(2), False, False) # optional - CHomP
{0: Vector space of dimension 2 over Finite Field of size 2, 1: Vector space of dimension 1 over Finite Field of size 2}
sage: D = ChainComplex({0: matrix(ZZ,1,0,[]), 1: matrix(ZZ,1,1,[0]),
....: 2: matrix(ZZ,0,1,[])})
sage: D._homology_chomp(None, GF(2), False, False) # optional - CHomP
{1: Vector space of dimension 1 over Finite Field of size 2,
2: Vector space of dimension 1 over Finite Field of size 2}
"""
from sage.interfaces.chomp import homchain
H = homchain(self, base_ring=base_ring, verbose=verbose,
generators=generators)
if H is None:
raise RuntimeError('ran CHomP, but no output')
if deg is None:
# all the homology groups that could be non-zero
# one has to complete the answer of chomp
result = H
for idx in self.nonzero_degrees():
if not(idx in H):
result[idx] = HomologyGroup(0, base_ring)
return result
if deg in H:
return H[deg]
else:
return HomologyGroup(0, base_ring)
@rename_keyword(deprecation=15151, dim='deg')
def homology(self, deg=None, base_ring=None, generators=False,
verbose=False, algorithm='pari'):
r"""
The homology of the chain complex.
INPUT:
- ``deg`` -- an element of the grading group for the chain
complex (default: ``None``); the degree in which
to compute homology -- if this is ``None``, return the
homology in every degree in which the chain complex is
possibly nonzero.
- ``base_ring`` -- a commutative ring (optional, default is the
base ring for the chain complex); must be either the
integers `\ZZ` or a field
- ``generators`` -- boolean (optional, default ``False``); if
``True``, return generators for the homology groups along with
the groups. See :trac:`6100`
- ``verbose`` - boolean (optional, default ``False``); if
``True``, print some messages as the homology is computed
- ``algorithm`` - string (optional, default ``'pari'``); the
options are:
* ``'auto'``
* ``'chomp'``
* ``'dhsw'``
* ``'pari'``
* ``'no_chomp'``
see below for descriptions
OUTPUT:
If the degree is specified, the homology in degree ``deg``.
Otherwise, the homology in every dimension as a dictionary
indexed by dimension.
ALGORITHM:
If ``algorithm`` is set to ``'auto'``, then use
CHomP if available. CHomP is available at the web page
http://chomp.rutgers.edu/. It is also an optional package
for Sage. If ``algorithm`` is ``chomp``, always use chomp.
CHomP computes homology, not cohomology, and only works over
the integers or finite prime fields. Therefore if any of
these conditions fails, or if CHomP is not present, or if
``algorithm`` is set to 'no_chomp', go to plan B: if ``self``
has a ``_homology`` method -- each simplicial complex has
this, for example -- then call that. Such a method implements
specialized algorithms for the particular type of cell
complex.
Otherwise, move on to plan C: compute the chain complex of
``self`` and compute its homology groups. To do this: over a
field, just compute ranks and nullities, thus obtaining
dimensions of the homology groups as vector spaces. Over the
integers, compute Smith normal form of the boundary matrices
defining the chain complex according to the value of
``algorithm``. If ``algorithm`` is ``'auto'`` or ``'no_chomp'``,
then for each relatively small matrix, use the standard Sage
method, which calls the Pari package. For any large matrix,
reduce it using the Dumas, Heckenbach, Saunders, and Welker
elimination algorithm [DHSW2003]_: see
:func:`~sage.homology.matrix_utils.dhsw_snf` for details.
Finally, ``algorithm`` may also be ``'pari'`` or ``'dhsw'``, which
forces the named algorithm to be used regardless of the size
of the matrices and regardless of whether CHomP is available.
As of this writing, ``'pari'`` is the fastest standard option.
The optional CHomP package may be better still.
.. WARNING::
This only works if the base ring is the integers or a
field. Other values will return an error.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C.homology()
{0: Z x Z, 1: Z x C3}
sage: C.homology(deg=1, base_ring = GF(3))
Vector space of dimension 2 over Finite Field of size 3
sage: D = ChainComplex({0: identity_matrix(ZZ, 4), 4: identity_matrix(ZZ, 30)})
sage: D.homology()
{0: 0, 1: 0, 4: 0, 5: 0}
Generators: generators are given as
a list of cycles, each of which is an element in the
appropriate free module, and hence is represented as a vector::
sage: C.homology(1, generators=True) # optional - CHomP
(Z x C3, [(0, 1), (1, 0)])
Tests for :trac:`6100`, the Klein bottle with generators::
sage: d0 = matrix(ZZ, 0,1)
sage: d1 = matrix(ZZ, 1,3, [[0,0,0]])
sage: d2 = matrix(ZZ, 3,2, [[1,1], [1,-1], [-1,1]])
sage: C_k = ChainComplex({0:d0, 1:d1, 2:d2}, degree=-1)
sage: C_k.homology(generators=true) # optional - CHomP
{0: (Z, [(1)]), 1: (Z x C2, [(0, 0, 1), (0, 1, -1)]), 2: 0}
From a torus using a field::
sage: T = simplicial_complexes.Torus()
sage: C_t = T.chain_complex()
sage: C_t.homology(base_ring=QQ, generators=True)
{0: [(Vector space of dimension 1 over Rational Field,
Chain(0:(0, 0, 0, 0, 0, 0, 1)))],
1: [(Vector space of dimension 1 over Rational Field,
Chain(1:(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 1))),
(Vector space of dimension 1 over Rational Field,
Chain(1:(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 1, 0, -1, 0)))],
2: [(Vector space of dimension 1 over Rational Field,
Chain(2:(1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1)))]}
"""
from sage.interfaces.chomp import have_chomp
if deg is not None and deg not in self.grading_group():
raise ValueError('degree is not an element of the grading group')
if base_ring is None:
base_ring = self.base_ring()
if not (base_ring.is_field() or base_ring is ZZ):
raise NotImplementedError('can only compute homology if the base ring is the integers or a field')
if algorithm not in ['dhsw', 'pari', 'auto', 'no_chomp', 'chomp']:
raise NotImplementedError('algorithm not recognized')
if algorithm == 'auto' \
and (base_ring == ZZ or (base_ring.is_prime_field() and base_ring != QQ)) \
and have_chomp('homchain'):
algorithm = 'chomp'
if algorithm == 'chomp':
return self._homology_chomp(deg, base_ring, verbose, generators)
if deg is None:
deg = self.nonzero_degrees()
if isinstance(deg, (list, tuple)):
answer = {}
for deg in self.nonzero_degrees():
answer[deg] = self._homology_in_degree(deg, base_ring, verbose, generators, algorithm)
return answer
else:
return self._homology_in_degree(deg, base_ring, verbose, generators, algorithm)
def _homology_in_degree(self, deg, base_ring, verbose, generators, algorithm):
"""
Helper method for :meth:`homology`.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C.homology(1) == C._homology_in_degree(1, ZZ, False, False, 'auto')
True
"""
if deg not in self.nonzero_degrees():
zero_homology = HomologyGroup(0, base_ring)
if generators:
return (zero_homology, vector(base_ring, []))
else:
return zero_homology
if verbose:
print('Computing homology of the chain complex in dimension %s...' % deg)
fraction_field = base_ring.fraction_field()
def change_ring(X):
if X.base_ring() is base_ring:
return X
return X.change_ring(base_ring)
# d_out is the differential going out of degree deg,
# d_in is the differential entering degree deg
differential = self.degree_of_differential()
d_in = change_ring(self.differential(deg - differential))
d_out = change_ring(self.differential(deg))
d_out_rank = self.rank(deg, ring=fraction_field)
d_out_nullity = d_out.ncols() - d_out_rank
if d_in.is_zero():
if generators: #Include the generators of the nullspace
return [(HomologyGroup(1, base_ring), self({deg:gen}))
for gen in d_out.right_kernel().basis()]
else:
return HomologyGroup(d_out_nullity, base_ring)
if generators:
orders, gens = self._homology_generators_snf(d_in, d_out, d_out_rank)
answer = [(HomologyGroup(1, base_ring, [order]), self({deg:gen}))
for order, gen in zip(orders, gens)]
else:
if base_ring.is_field():
d_in_rank = self.rank(deg-differential, ring=base_ring)
answer = HomologyGroup(d_out_nullity - d_in_rank, base_ring)
elif base_ring == ZZ:
if d_in.ncols() == 0:
all_divs = [0] * d_out_nullity
else:
if algorithm in ['auto', 'no_chomp']:
if ((d_in.ncols() > 300 and d_in.nrows() > 300)
or (min(d_in.ncols(), d_in.nrows()) > 100 and
d_in.ncols() + d_in.nrows() > 600)):
algorithm = 'dhsw'
else:
algorithm = 'pari'
if algorithm == 'dhsw':
from sage.homology.matrix_utils import dhsw_snf
all_divs = dhsw_snf(d_in, verbose=verbose)
elif algorithm == 'pari':
all_divs = d_in.elementary_divisors(algorithm)
else:
raise ValueError('unsupported algorithm')
all_divs = all_divs[:d_out_nullity]
# divisors equal to 1 produce trivial
# summands, so filter them out
divisors = [x for x in all_divs if x != 1]
answer = HomologyGroup(len(divisors), base_ring, divisors)
else:
raise NotImplementedError('only base rings ZZ and fields are supported')
return answer
def _homology_generators_snf(self, d_in, d_out, d_out_rank):
"""
Compute the homology generators using the Smith normal form.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C.homology(1)
Z x C3
sage: C._homology_generators_snf(C.differential(0), C.differential(1), 0)
([3, 0], [(1, 0), (0, 1)])
"""
# Find the kernel of the out-going differential.
K = d_out.right_kernel().matrix().transpose().change_ring(d_out.base_ring())
# Compute the induced map to the kernel
S = K.augment(d_in).hermite_form()
d_in_induced = S.submatrix(row=0, nrows=d_in.nrows()-d_out_rank,
col=d_in.nrows()-d_out_rank, ncols=d_in.ncols())
# Find the SNF of the induced matrix and appropriate generators
(N, P, Q) = d_in_induced.smith_form()
all_divs = [0]*N.nrows()
non_triv = 0
for i in range(0, N.nrows()):
if i >= N.ncols():
break
all_divs[i] = N[i][i]
if N[i][i] == 1:
non_triv = non_triv + 1
divisors = [x for x in all_divs if x != 1]
gens = (K * P.inverse().submatrix(col=non_triv)).columns()
return divisors, gens
def betti(self, deg=None, base_ring=None):
"""
The Betti number the chain complex.
That is, write the homology in this degree as a direct sum
of a free module and a torsion module; the Betti number is the
rank of the free summand.
INPUT:
- ``deg`` -- an element of the grading group for the chain
complex or None (default ``None``); if ``None``,
then return every Betti number, as a dictionary indexed by
degree, or if an element of the grading group, then return
the Betti number in that degree
- ``base_ring`` -- a commutative ring (optional, default is the
base ring for the chain complex); compute homology with
these coefficients -- must be either the integers or a
field
OUTPUT:
The Betti number in degree ``deg`` -- the rank of the free
part of the homology module in this degree.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C.betti(0)
2
sage: [C.betti(n) for n in range(5)]
[2, 1, 0, 0, 0]
sage: C.betti()
{0: 2, 1: 1}
sage: D = ChainComplex({0:matrix(GF(5), [[3, 1],[1, 2]])})
sage: D.betti()
{0: 1, 1: 1}
"""
if base_ring is None:
base_ring = self.base_ring()
try:
base_ring = base_ring.fraction_field()
except AttributeError:
raise NotImplementedError('only implemented if the base ring is ZZ or a field')
H = self.homology(deg, base_ring=base_ring)
if isinstance(H, dict):
return {deg: homology_group.dimension()
for deg, homology_group in iteritems(H)}
else:
return H.dimension()
def torsion_list(self, max_prime, min_prime=2):
r"""
Look for torsion in this chain complex by computing its mod `p`
homology for a range of primes `p`.
INPUT:
- ``max_prime`` -- prime number; search for torsion mod `p` for
all `p` strictly less than this number
- ``min_prime`` -- prime (optional, default 2); search for
torsion mod `p` for primes at least as big as this
Return a list of pairs `(p, d)` where `p` is a prime at which
there is torsion and `d` is a list of dimensions in which this
torsion occurs.
The base ring for the chain complex must be the integers; if
not, an error is raised.
ALGORITHM:
let `C` denote the chain complex. Let `P` equal
``max_prime``. Compute the mod `P` homology of `C`, and use
this as the base-line computation: the assumption is that this
is isomorphic to the integral homology tensored with
`\GF{P}`. Then compute the mod `p` homology for a range of
primes `p`, and record whenever the answer differs from the
base-line answer.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C.homology()
{0: Z x Z, 1: Z x C3}
sage: C.torsion_list(11)
[(3, [1])]
sage: C = ChainComplex([matrix(ZZ, 1, 1, [2]), matrix(ZZ, 1, 1), matrix(1, 1, [3])])
sage: C.homology(1)
C2
sage: C.homology(3)
C3
sage: C.torsion_list(5)
[(2, [1]), (3, [3])]
"""
if self.base_ring() != ZZ:
raise NotImplementedError('only implemented for base ring the integers')
answer = []
torsion_free = self.betti(base_ring=GF(max_prime))
for p in prime_range(min_prime, max_prime):
mod_p_betti = self.betti(base_ring=GF(p))
if mod_p_betti != torsion_free:
diff_dict = {}
temp_diff = {}
D = self.degree_of_differential()
for i in torsion_free:
temp_diff[i] = mod_p_betti.get(i, 0) - torsion_free[i]
for i in temp_diff:
if temp_diff[i] > 0:
if i+D in diff_dict:
lower = diff_dict[i+D]
else:
lower = 0
current = temp_diff[i]
if current > lower:
diff_dict[i] = current - lower
if i-D in diff_dict:
diff_dict[i-D] -= current - lower
differences = []
for i in diff_dict:
if diff_dict[i] != 0:
differences.append(i)
answer.append((p,differences))
return answer
def _Hom_(self, other, category=None):
"""
Return the set of chain maps between chain complexes ``self``
and ``other``.
EXAMPLES::
sage: S = simplicial_complexes.Sphere(2)
sage: T = simplicial_complexes.Torus()
sage: C = S.chain_complex(augmented=True,cochain=True)
sage: D = T.chain_complex(augmented=True,cochain=True)
sage: Hom(C,D) # indirect doctest
Set of Morphisms from Chain complex with at most 4 nonzero terms over
Integer Ring to Chain complex with at most 4 nonzero terms over Integer
Ring in Category of chain complexes over Integer Ring
"""
from sage.homology.chain_complex_homspace import ChainComplexHomspace
return ChainComplexHomspace(self, other)
def _flip_(self):
"""
Flip chain complex upside down (degree `n` gets changed to
degree `-n`), thus turning a chain complex into a cochain complex
without changing the homology (except for flipping it, too).
EXAMPLES::
sage: C = ChainComplex({2: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C.degree_of_differential()
1
sage: C.differential(2)
[3 0 0]
[0 0 0]
sage: C._flip_().degree_of_differential()
-1
sage: C._flip_().differential(-2)
[3 0 0]
[0 0 0]
"""
data = {}
deg = self.degree_of_differential()
for d in self.differential():
data[-d] = self.differential()[d]
return ChainComplex(data, degree=-deg)
def shift(self, n=1):
"""
Shift this chain complex `n` times.
INPUT:
- ``n`` -- an integer (optional, default 1)
The *shift* operation is also sometimes called *translation* or
*suspension*.
To shift a chain complex by `n`, shift its entries up by `n`
(if it is a chain complex) or down by `n` (if it is a cochain
complex); that is, shifting by 1 always shifts in the opposite
direction of the differential. In symbols, if `C` is a chain
complex and `C[n]` is its `n`-th shift, then `C[n]_j =
C_{j-n}`. The differential in the shift `C[n]` is obtained by
multiplying each differential in `C` by `(-1)^n`.
Caveat: different sources use different conventions for
shifting: what we call `C[n]` might be called `C[-n]` in some
places. See for example.
https://ncatlab.org/nlab/show/suspension+of+a+chain+complex
(which uses `C[n]` as we do but acknowledges `C[-n]`) or 1.2.8
in [Wei1994]_ (which uses `C[-n]`).
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1).chain_complex()
sage: S1.shift(1).differential(2) == -S1.differential(1)
True
sage: S1.shift(2).differential(3) == S1.differential(1)
True
sage: S1.shift(3).homology(4)
Z
For cochain complexes, shifting goes in the other
direction. Topologically, this makes sense if we grade the
cochain complex for a space negatively::
sage: T = simplicial_complexes.Torus()
sage: co_T = T.chain_complex()._flip_()
sage: co_T.homology()
{-2: Z, -1: Z x Z, 0: Z}
sage: co_T.degree_of_differential()
1
sage: co_T.shift(2).homology()
{-4: Z, -3: Z x Z, -2: Z}
You can achieve the same result by tensoring (on the left, to
get the signs right) with a rank one free module in degree
``-n * deg``, if ``deg`` is the degree of the differential::
sage: C = ChainComplex({-2: matrix(ZZ, 0, 1)})
sage: C.tensor(co_T).homology()
{-4: Z, -3: Z x Z, -2: Z}
"""
deg = self.degree_of_differential()
shift = n * deg
sgn = (-1)**n
return ChainComplex({k-shift: sgn * self._diff[k] for k in self._diff},
degree_of_differential=deg)
def _chomp_repr_(self):
r"""
String representation of ``self`` suitable for use by the CHomP
program.
Since CHomP can only handle chain complexes, not cochain
complexes, and since it likes its complexes to start in degree
0, flip the complex over if necessary, and shift it to start
in degree 0. Note also that CHomP only works over the
integers or a finite prime field.
EXAMPLES::
sage: C = ChainComplex({-2: matrix(ZZ, 1, 3, [3, 0, 0])}, degree=-1)
sage: C._chomp_repr_()
'chain complex\n\nmax dimension = 1\n\ndimension 0\n boundary a1 = 0\n\ndimension 1\n boundary a1 = + 3 * a1 \n boundary a2 = 0\n boundary a3 = 0\n\n'
sage: C = ChainComplex({-2: matrix(ZZ, 1, 3, [3, 0, 0])}, degree=1)
sage: C._chomp_repr_()
'chain complex\n\nmax dimension = 1\n\ndimension 0\n boundary a1 = 0\n\ndimension 1\n boundary a1 = + 3 * a1 \n boundary a2 = 0\n boundary a3 = 0\n\n'
"""
deg = self.degree_of_differential()
if (self.grading_group() != ZZ or
(deg != 1 and deg != -1)):
raise ValueError('CHomP only works on Z-graded chain complexes with '
'differential of degree 1 or -1')
base_ring = self.base_ring()
if (base_ring == QQ) or (base_ring != ZZ and not (base_ring.is_prime_field())):
raise ValueError('CHomP doesn\'t compute over the rationals, only over Z or F_p')
if deg == -1:
diffs = self.differential()
else:
diffs = self._flip_().differential()
if len(diffs) == 0:
diffs = {0: matrix(ZZ, 0,0)}
maxdim = max(diffs)
mindim = min(diffs)
# will shift chain complex by subtracting mindim from
# dimensions, so its bottom dimension is zero.
s = "chain complex\n\nmax dimension = %s\n\n" % (maxdim - mindim - 1,)
for i in range(0, maxdim - mindim):
s += "dimension %s\n" % i
mat = diffs.get(i + mindim, matrix(base_ring, 0, 0))
for idx in range(mat.ncols()):
s += " boundary a%s = " % (idx + 1)
# construct list of bdries
col = mat.column(idx)
nonzero_pos = col.nonzero_positions()
if nonzero_pos:
for j in nonzero_pos:
entry = col[j]
if entry > 0:
sgn = "+"
else:
sgn = "-"
entry = -entry
s += "%s %s * a%s " % (sgn, entry, j+1)
else:
s += "0"
s += "\n"
s += "\n"
return s
def _repr_(self):
"""
Print representation.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C
Chain complex with at most 2 nonzero terms over Integer Ring
"""
diffs = [mat for mat in self._diff.values() if mat.nrows() + mat.ncols() > 0]
if len(diffs) == 0:
s = 'Trivial chain complex'
else:
s = 'Chain complex with at most {0} nonzero terms'.format(len(diffs)-1)
s += ' over {0}'.format(self.base_ring())
return s
def _ascii_art_(self):
"""
Return an ascii art representation.
Note that arrows go to the left so that composition of
differentials is the usual matrix multiplication.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0]), 1:zero_matrix(1,2)})
sage: ascii_art(C)
[3 0 0]
[0 0] [0 0 0]
0 <-- C_2 <------ C_1 <-------- C_0 <-- 0
sage: one = matrix(ZZ, [[1]])
sage: D = ChainComplex({0: one, 2: one, 6:one})
sage: ascii_art(D)
[1] [1] [0] [1]
0 <-- C_7 <---- C_6 <-- 0 ... 0 <-- C_3 <---- C_2 <---- C_1 <---- C_0 <-- 0
"""
from sage.typeset.ascii_art import AsciiArt
def arrow_art(n):
d_n = self.differential(n)
if d_n.nrows() == 0 or d_n.ncols() == 0:
return AsciiArt(['<--'])
d_str = [' '+line+' ' for line in str(d_n).splitlines()]
arrow = '<' + '-'*(len(d_str[0])-1)
d_str.append(arrow)
return AsciiArt(d_str)
def module_art(n):
C_n = self.free_module(n)
if C_n.rank() == 0:
return AsciiArt([' 0 '])
else:
return AsciiArt([' C_{0} '.format(n)])
result = []
for ordered in self.ordered_degrees():
ordered = list(reversed(ordered))
if len(ordered) == 0:
return AsciiArt(['0'])
result_ordered = module_art(ordered[0] + self.degree_of_differential())
for n in ordered:
result_ordered += arrow_art(n) + module_art(n)
result = [result_ordered] + result
concatenated = result[0]
for r in result[1:]:
concatenated += AsciiArt([' ... ']) + r
return concatenated
def _unicode_art_(self):
"""
Return a unicode art representation.
Note that arrows go to the left so that composition of
differentials is the usual matrix multiplication.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0]), 1:zero_matrix(1,2)})
sage: unicode_art(C)
⎛3 0 0⎞
(0 0) ⎝0 0 0⎠
0 ⟵── C_2 ⟵──── C_1 ⟵────── C_0 ⟵── 0
sage: one = matrix(ZZ, [[1]])
sage: D = ChainComplex({0: one, 2: one, 6:one})
sage: unicode_art(D)
(1) (1) (0) (1)
0 ⟵── C_7 ⟵── C_6 ⟵── 0 ... 0 ⟵── C_3 ⟵── C_2 ⟵── C_1 ⟵── C_0 ⟵── 0
"""
from sage.typeset.unicode_art import UnicodeArt
def arrow_art(n):
d_n = self.differential(n)
if not d_n.nrows() or not d_n.ncols():
return UnicodeArt([u'⟵──'])
d_str = list(d_n._unicode_art_())
arrow = u'⟵' + u'─' * (len(d_str[0]) - 1)
d_str.append(arrow)
return UnicodeArt(d_str)
def module_art(n):
C_n = self.free_module(n)
if not C_n.rank():
return UnicodeArt([u' 0 '])
else:
return UnicodeArt([u' C_{0} '.format(n)])
result = []
for ordered in self.ordered_degrees():
ordered = list(reversed(ordered))
if not ordered:
return UnicodeArt([u'0'])
result_ordered = module_art(ordered[0] + self.degree_of_differential())
for n in ordered:
result_ordered += arrow_art(n) + module_art(n)
result = [result_ordered] + result
concatenated = result[0]
for r in result[1:]:
concatenated += UnicodeArt([u' ... ']) + r
return concatenated
def _latex_(self):
"""
LaTeX print representation.
EXAMPLES::
sage: C = ChainComplex({0: matrix(ZZ, 2, 3, [3, 0, 0, 0, 0, 0])})
sage: C._latex_()
'\\Bold{Z}^{3} \\xrightarrow{d_{0}} \\Bold{Z}^{2}'
sage: ChainComplex()._latex_()
'0'
sage: G = AdditiveAbelianGroup([0, 0])
sage: m = matrix([0])
sage: C = ChainComplex(grading_group=G, degree=G(vector([1,2])), data={G.zero(): m})
sage: C._latex_()
'\\dots \\xrightarrow{d_{\\text{\\texttt{(0,{ }0)}}}} \\Bold{Z}^{1} \\xrightarrow{d_{\\text{\\texttt{(1,{ }2)}}}} \\dots'
"""
# Warning: this is likely to screw up if, for example, the
# degree of the differential is 2 and there are nonzero terms
# in consecutive dimensions (e.g., in dimensions 0 and 1). In
# such cases, the representation might show a differential
# connecting these terms, although the differential goes from
# dimension 0 to dimension 2, and from dimension 1 to
# dimension 3, etc. I don't know how much effort should be
# put into trying to fix this.
string = ""
diffs = self._diff
if len(diffs) == 0:
return "0"
deg = self.degree_of_differential()
ring = self.base_ring()
if self.grading_group() != ZZ:
guess = next(iter(diffs))
if guess - deg in diffs:
string += "\\dots \\xrightarrow{d_{%s}} " % latex(guess-deg)
string += _latex_module(ring, diffs[guess].ncols())
string += " \\xrightarrow{d_{%s}} \\dots" % latex(guess)
else:
backwards = (deg < 0)
sorted_list = sorted(diffs.keys(), reverse=backwards)
if len(diffs) <= 6:
for n in sorted_list[1:-1]:
mat = diffs[n]
string += _latex_module(ring, mat.ncols())
string += " \\xrightarrow{d_{%s}} " % latex(n)
mat = diffs[sorted_list[-1]]
string += _latex_module(ring, mat.ncols())
else:
for n in sorted_list[:2]:
mat = diffs[n]
string += _latex_module(ring, mat.ncols())
string += " \\xrightarrow{d_{%s}} " % latex(n)
string += "\\dots "
n = sorted_list[-2]
string += "\\xrightarrow{d_{%s}} " % latex(n)
mat = diffs[sorted_list[-1]]
string += _latex_module(ring, mat.ncols())
return string
def cartesian_product(self, *factors, **kwds):
r"""
Return the direct sum (Cartesian product) of ``self`` with ``D``.
Let `C` and `D` be two chain complexes with differentials
`\partial_C` and `\partial_D`, respectively, of the same degree (so
they must also have the same grading group).
The direct sum `S = C \oplus D` is a chain complex given by
`S_i = C_i \oplus D_i` with differential
`\partial = \partial_C \oplus \partial_D`.
INPUT:
- ``subdivide`` -- (default: ``False``) whether to subdivide the
the differential matrices
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: C = ChainComplex([matrix([[-y],[x]]), matrix([[x, y]])])
sage: D = ChainComplex([matrix([[x-y]]), matrix([[0], [0]])])
sage: ascii_art(C.cartesian_product(D))
[x y 0] [ -y 0]
[0 0 0] [ x 0]
[0 0 0] [ 0 x - y]
0 <-- C_2 <-------- C_1 <-------------- C_0 <-- 0
sage: D = ChainComplex({1:matrix([[x-y]]), 4:matrix([[x], [y]])})
sage: ascii_art(D)
[x]
[y] [x - y]
0 <-- C_5 <---- C_4 <-- 0 <-- C_2 <-------- C_1 <-- 0
sage: ascii_art(cartesian_product([C, D]))
[-y]
[x] [ x y 0] [ x]
[y] [ 0 0 x - y] [ 0]
0 <-- C_5 <---- C_4 <-- 0 <-- C_2 <-------------------- C_1 <----- C_0 <-- 0
The degrees of the differentials must agree::
sage: C = ChainComplex({1:matrix([[x]])}, degree_of_differential=-1)
sage: D = ChainComplex({1:matrix([[x]])}, degree_of_differential=1)
sage: C.cartesian_product(D)
Traceback (most recent call last):
...
ValueError: the degrees of the differentials must match
TESTS::
sage: C = ChainComplex({2:matrix([[-1],[2]]), 1:matrix([[2, 1]])},
....: degree_of_differential=-1)
sage: ascii_art(C.cartesian_product(C, subdivide=True))
[-1| 0]
[ 2| 0]
[2 1|0 0] [--+--]
[---+---] [ 0|-1]
[0 0|2 1] [ 0| 2]
0 <-- C_0 <---------- C_1 <-------- C_2 <-- 0
::
sage: R.<x,y,z> = QQ[]
sage: C1 = ChainComplex({1:matrix([[x]])})
sage: C2 = ChainComplex({1:matrix([[y]])})
sage: C3 = ChainComplex({1:matrix([[z]])})
sage: ascii_art(cartesian_product([C1, C2, C3]))
[x 0 0]
[0 y 0]
[0 0 z]
0 <-- C_2 <-------- C_1 <-- 0
sage: ascii_art(C1.cartesian_product([C2, C3], subdivide=True))
[x|0|0]
[-+-+-]
[0|y|0]
[-+-+-]
[0|0|z]
0 <-- C_2 <-------- C_1 <-- 0
::
sage: R.<x> = ZZ[]
sage: G = AdditiveAbelianGroup([0,7])
sage: d = {G(vector([1,1])):matrix([[x]])}
sage: C = ChainComplex(d, grading_group=G, degree=G(vector([2,1])))
sage: ascii_art(C.cartesian_product(C))
[x 0]
[0 x]
0 <-- C_(3, 2) <------ C_(1, 1) <-- 0
"""
if not factors:
return self
if isinstance(factors[0], (list, tuple)):
factors = factors[0]
deg_diff = self.degree_of_differential()
if any(D.degree_of_differential() != deg_diff for D in factors):
raise ValueError("the degrees of the differentials must match")
if any(D.grading_group() != self._grading_group for D in factors):
raise ValueError("the grading groups must match")
factors = [self] + list(factors)
R = self.base_ring()
zero = matrix(R, [])
subdivide = kwds.get('subdivide', False)
ret = self
diffs = [D.differential() for D in factors]
keys = reduce(lambda X,d: X.union(d.keys()), diffs, set())
ret = {k: matrix.block_diagonal([d.get(k, zero) for d in diffs],
subdivide=subdivide)
for k in keys}
return ChainComplex(ret, degree_of_differential=deg_diff,
grading_group=self._grading_group)
def tensor(self, *factors, **kwds):
r"""
Return the tensor product of ``self`` with ``D``.
Let `C` and `D` be two chain complexes with differentials
`\partial_C` and `\partial_D`, respectively, of the same degree (so
they must also have the same grading group).
The tensor product `S = C \otimes D` is a chain complex given by
.. MATH::
S_i = \bigoplus_{a+b=i} C_a \otimes D_b
with differential
.. MATH::
\partial(x \otimes y) = \partial_C x \otimes y
+ (-1)^{|a| \cdot |\partial_D|} x \otimes \partial_D y
for `x \in C_a` and `y \in D_b`, where `|a|` is the degree of `a` and
`|\partial_D|` is the degree of `\partial_D`.
.. WARNING::
If the degree of the differential is even, then this may not
result in a valid chain complex.
INPUT:
- ``subdivide`` -- (default: ``False``) whether to subdivide the
the differential matrices
.. TODO::
Make subdivision work correctly on multiple factors.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: C1 = ChainComplex({1:matrix([[x]])}, degree_of_differential=-1)
sage: C2 = ChainComplex({1:matrix([[y]])}, degree_of_differential=-1)
sage: C3 = ChainComplex({1:matrix([[z]])}, degree_of_differential=-1)
sage: ascii_art(C1.tensor(C2))
[ x]
[y x] [-y]
0 <-- C_0 <------ C_1 <----- C_2 <-- 0
sage: ascii_art(C1.tensor(C2).tensor(C3))
[ y x 0] [ x]
[-z 0 x] [-y]
[z y x] [ 0 -z -y] [ z]
0 <-- C_0 <-------- C_1 <----------- C_2 <----- C_3 <-- 0
::
sage: C = ChainComplex({2:matrix([[-y],[x]]), 1:matrix([[x, y]])},
....: degree_of_differential=-1); ascii_art(C)
[-y]
[x y] [ x]
0 <-- C_0 <------ C_1 <----- C_2 <-- 0
sage: T = C.tensor(C)
sage: T.differential(1)
[x y x y]
sage: T.differential(2)
[-y x 0 y 0 0]
[ x 0 x 0 y 0]
[ 0 -x -y 0 0 -y]
[ 0 0 0 -x -y x]
sage: T.differential(3)
[ x y 0 0]
[ y 0 -y 0]
[-x 0 0 -y]
[ 0 y x 0]
[ 0 -x 0 x]
[ 0 0 x y]
sage: T.differential(4)
[-y]
[ x]
[-y]
[ x]
The degrees of the differentials must agree::
sage: C1p = ChainComplex({1:matrix([[x]])}, degree_of_differential=1)
sage: C1.tensor(C1p)
Traceback (most recent call last):
...
ValueError: the degrees of the differentials must match
TESTS::
sage: R.<x,y,z> = QQ[]
sage: C1 = ChainComplex({1:matrix([[x]])})
sage: C2 = ChainComplex({1:matrix([[y]])})
sage: C3 = ChainComplex({1:matrix([[z]])})
sage: ascii_art(tensor([C1, C2, C3]))
[-y -z 0] [ z]
[ x 0 -z] [-y]
[x y z] [ 0 x y] [ x]
0 <-- C_6 <-------- C_5 <----------- C_4 <----- C_3 <-- 0
::
sage: R.<x,y> = ZZ[]
sage: G = AdditiveAbelianGroup([0,7])
sage: d1 = {G(vector([1,1])):matrix([[x]])}
sage: C1 = ChainComplex(d1, grading_group=G, degree=G(vector([2,1])))
sage: d2 = {G(vector([3,0])):matrix([[y]])}
sage: C2 = ChainComplex(d2, grading_group=G, degree=G(vector([2,1])))
sage: ascii_art(C1.tensor(C2))
[y]
[ x -y] [x]
0 <-- C_(8, 3) <-------- C_(6, 2) <---- C_(4, 1) <-- 0
Check that :trac:`21760` is fixed::
sage: C = ChainComplex({0: matrix(ZZ, 0, 2)}, degree=-1)
sage: ascii_art(C)
0 <-- C_0 <-- 0
sage: T = C.tensor(C)
sage: ascii_art(T)
0 <-- C_0 <-- 0
sage: T.free_module_rank(0)
4
"""
if not factors:
return self
if isinstance(factors[0], (list, tuple)):
factors = factors[0]
deg_diff = self.degree_of_differential()
if any(D.degree_of_differential() != deg_diff for D in factors):
raise ValueError("the degrees of the differentials must match")
if any(D.grading_group() != self._grading_group for D in factors):
raise ValueError("the grading groups must match")
R = self.base_ring()
zero = R.zero()
subdivide = kwds.get('subdivide', False)
ret = self
if self._grading_group is ZZ:
scalar = lambda a: (-1)**(a * deg_diff)
else:
scalar = lambda a: (-1)**(sum(a) * sum(deg_diff))
for D in factors:
# Setup
d = ret.differential()
dD = D.differential()
deg = sorted((k, ret.free_module_rank(k)) for k in d
if ret.free_module_rank(k) > 0)
degD = sorted((k, D.free_module_rank(k)) for k in dD
if D.free_module_rank(k) > 0)
diff = {}
# Our choice for tensor products will be x # y = x1 * y + x2 * y + ...
# Generate the data for the differential
for a,r in deg:
for b,s in degD:
rp = d[a].nrows()
sp = dD[b].nrows()
if a+b not in diff:
diff[a+b] = {}
mor = diff[a+b]
cur = {}
cur[(a+deg_diff,b)] = []
cur[(a,b+deg_diff)] = []
for i in range(r):
for j in range(s):
# \partial x_i \otimes y_j
vec = [zero]*(rp*s)
for k,val in enumerate(d[a].column(i)):
vec[s*k+j] += val
cur[(a+deg_diff,b)].append(vec)
# (-1)^a x_i \otimes \partial y_j
vec = [zero]*(r*sp)
for k,val in enumerate(dD[b].column(j)):
vec[sp*i+k] += scalar(a) * val
cur[(a,b+deg_diff)].append(vec)
mor[a,b] = cur
# Parse the data into matrices
to_delete = []
for k in diff:
# Get the data and interchange the indices
mor = diff[k]
row_keys = sorted(mor.keys())
cols = {}
col_widths = {}
for dom in mor:
c = mor[dom]
for im in c:
if im not in cols:
cols[im] = {}
col_widths[im] = len(c[im])
cols[im][dom] = c[im]
col_keys = sorted(cols.keys())
# Now build the matrix
M = []
for ck in col_keys:
M.append([])
col = cols[ck]
for rk in row_keys:
if rk in col:
M[-1].append(matrix(R, col[rk]).transpose())
else:
M[-1].append(zero)
diff[k] = matrix.block(M, subdivide=subdivide)
# Flag for removal any 0x0 matrices
if diff[k].nrows() == 0 and diff[k].ncols() == 0:
to_delete.append(k)
# Delete the 0x0 matrices
for k in to_delete:
del diff[k]
ret = ChainComplex(diff, degree_of_differential=deg_diff,
grading_group=self._grading_group)
return ret
from sage.misc.persist import register_unpickle_override
register_unpickle_override('sage.homology.chain_complex', 'ChainComplex', ChainComplex_class)
| python | 84,437 |
# -*- coding: utf-8 -*-
__version__ = '{{cookiecutter.version}}'
| python | 66 |
# -*- coding: utf-8 -*-
#
# SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 19:50:10 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../lib'))
sys.path.insert(0, os.path.abspath('../..')) # examples
sys.path.insert(0, os.path.abspath('.'))
import sqlalchemy
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'zzzeeksphinx',
'changelog',
'sphinx_paramlinks',
#'corrections'
]
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# have reported this.
templates_path = [os.path.abspath('templates')]
nitpicky = True
# The suffix of source filenames.
source_suffix = '.rst'
# section names used by the changelog extension.
changelog_sections = ["general", "orm", "orm declarative", "orm querying", \
"orm configuration", "engine", "sql", \
"schema", \
"postgresql", "mysql", "sqlite", "mssql", \
"oracle", "firebird"]
# tags to sort on inside of sections
changelog_inner_tag_sort = ["feature", "changed", "removed", "bug", "moved"]
# how to render changelog links
changelog_render_ticket = "http://www.sqlalchemy.org/trac/ticket/%s"
changelog_render_pullreq = {
"bitbucket": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"default": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"github": "https://github.com/zzzeek/sqlalchemy/pull/%s",
}
changelog_render_changeset = "http://www.sqlalchemy.org/trac/changeset/%s"
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression",
"sqlalchemy.engine.base": "sqlalchemy.engine",
"sqlalchemy.engine.result": "sqlalchemy.engine",
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'SQLAlchemy'
copyright = u'2007-2017, the SQLAlchemy authors and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.2"
# The full version, including alpha/beta/rc tags.
release = "1.2.0b1"
release_date = None
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
# arbitrary number recognized by builders.py, incrementing this
# will force a rebuild
build_number = 3
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# have the "gettext" build generate .pot for each individual
# .rst
gettext_compact = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'zzzeeksphinx'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%m/%d/%Y %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_copy_source = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SQLAlchemydoc'
#autoclass_content = 'both'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'sqlalchemy_%s.tex' % release.replace('.', '_'), ur'SQLAlchemy Documentation',
ur'Mike Bayer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = '\setcounter{tocdepth}{3}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
#latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
#}
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sqlalchemy', u'SQLAlchemy Documentation',
[u'SQLAlchemy authors'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'SQLAlchemy'
epub_author = u'SQLAlchemy authors'
epub_publisher = u'SQLAlchemy authors'
epub_copyright = u'2007-2015, SQLAlchemy authors'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
intersphinx_mapping = {
'alembic': ('http://alembic.zzzcomputing.com/en/latest/', None),
'psycopg2': ('http://pythonhosted.org/psycopg2', None),
}
| python | 11,391 |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
from hydra.utils import instantiate
from omegaconf import MISSING, DictConfig, OmegaConf
from omegaconf.errors import ConfigAttributeError
from pytorch_lightning import Trainer
from nemo.collections.asr.data.audio_to_text import FastPitchDataset
from nemo.collections.asr.parts import parsers
from nemo.collections.tts.losses.fastpitchloss import FastPitchLoss
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.modules.fastpitch import FastPitchModule
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import MelSpectrogramType, RegressionValuesType, TokenDurationType, TokenIndex
from nemo.core.neural_types.neural_type import NeuralType
@dataclass
class FastPitchConfig:
parser: Dict[Any, Any] = MISSING
preprocessor: Dict[Any, Any] = MISSING
input_fft: Dict[Any, Any] = MISSING
output_fft: Dict[Any, Any] = MISSING
duration_predictor: Dict[Any, Any] = MISSING
pitch_predictor: Dict[Any, Any] = MISSING
class FastPitchModel(SpectrogramGenerator):
"""FastPitch Model that is used to generate mel spectrograms from text"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg, trainer=trainer)
self._parser = None
schema = OmegaConf.structured(FastPitchConfig)
# ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
# Ensure passed cfg is compliant with schema
OmegaConf.merge(cfg, schema)
self.preprocessor = instantiate(self._cfg.preprocessor)
input_fft = instantiate(self._cfg.input_fft)
output_fft = instantiate(self._cfg.output_fft)
duration_predictor = instantiate(self._cfg.duration_predictor)
pitch_predictor = instantiate(self._cfg.pitch_predictor)
self.fastpitch = FastPitchModule(
input_fft,
output_fft,
duration_predictor,
pitch_predictor,
cfg.n_speakers,
cfg.symbols_embedding_dim,
cfg.pitch_embedding_kernel_size,
cfg.n_mel_channels,
)
self.loss = FastPitchLoss()
@property
def parser(self):
if self._parser is not None:
return self._parser
if self._validation_dl is not None:
return self._validation_dl.dataset.parser
if self._test_dl is not None:
return self._test_dl.dataset.parser
if self._train_dl is not None:
return self._train_dl.dataset.parser
# Else construct a parser
# Try to get params from validation, test, and then train
params = {}
try:
params = self._cfg.validation_ds.dataset
except ConfigAttributeError:
pass
if params == {}:
try:
params = self._cfg.test_ds.dataset
except ConfigAttributeError:
pass
if params == {}:
try:
params = self._cfg.train_ds.dataset
except ConfigAttributeError:
pass
name = params.get('parser', None) or 'en'
unk_id = params.get('unk_index', None) or -1
blank_id = params.get('blank_index', None) or -1
do_normalize = params.get('normalize', None) or False
self._parser = parsers.make_parser(
labels=self._cfg.labels, name=name, unk_id=unk_id, blank_id=blank_id, do_normalize=do_normalize,
)
return self._parser
def parse(self, str_input: str) -> torch.tensor:
if str_input[-1] not in [".", "!", "?"]:
str_input = str_input + "."
tokens = self.parser(str_input)
x = torch.tensor(tokens).unsqueeze_(0).long().to(self.device)
return x
@typecheck(
input_types={
"text": NeuralType(('B', 'T'), TokenIndex()),
"durs": NeuralType(('B', 'T'), TokenDurationType(), optional=True),
"pitch": NeuralType(('B', 'T'), RegressionValuesType(), optional=True),
"speaker": NeuralType(optional=True), # NeuralType(('B'), IntType(), optional=True),
"pace": NeuralType(optional=True),
}
)
def forward(self, *, text, durs=None, pitch=None, speaker=0, pace=1.0):
return self.fastpitch(text=text, durs=durs, pitch=pitch, speaker=speaker, pace=pace)
@typecheck(output_types={"spect": NeuralType(('B', 'T', 'C'), MelSpectrogramType())})
def generate_spectrogram(self, tokens: 'torch.tensor', speaker: int = 0, pace: float = 1.0) -> torch.tensor:
self.eval()
spect, *_ = self(text=tokens, durs=None, pitch=None, speaker=speaker, pace=pace)
return spect
def training_step(self, batch, batch_idx):
audio, audio_lens, text, text_lens, durs, pitch, speakers = batch
mels, mel_lens = self.preprocessor(input_signal=audio, length=audio_lens)
mels_pred, mel_lens, _, _, log_durs_pred, pitch_pred = self(
text=text, durs=durs, pitch=pitch, speaker=speakers, pace=1.0
)
loss, mel_loss, dur_loss, pitch_loss = self.loss(
spect_predicted=mels_pred,
log_durs_predicted=log_durs_pred,
pitch_predicted=pitch_pred,
spect_tgt=mels,
durs_tgt=durs,
dur_lens=text_lens,
pitch_tgt=pitch,
)
losses = {
"mel_loss": mel_loss,
"dur_loss": dur_loss,
"pitch_loss": pitch_loss,
}
all_losses = {"loss": loss, **losses}
return {**all_losses, "progress_bar": losses, "log": all_losses}
def validation_step(self, batch, batch_idx):
audio, audio_lens, text, text_lens, durs, pitch, speakers = batch
mels, mel_lens = self.preprocessor(input_signal=audio, length=audio_lens)
# Calculate val loss on ground truth durations to better align L2 loss in time
mels_pred, mel_lens, _, _, log_durs_pred, pitch_pred = self(
text=text, durs=durs, pitch=None, speaker=speakers, pace=1.0
)
loss, mel_loss, dur_loss, pitch_loss = self.loss(
spect_predicted=mels_pred,
log_durs_predicted=log_durs_pred,
pitch_predicted=pitch_pred,
spect_tgt=mels,
durs_tgt=durs,
dur_lens=text_lens,
pitch_tgt=pitch,
)
ret = {
"loss": loss,
"mel_loss": mel_loss,
"dur_loss": dur_loss,
"pitch_loss": pitch_loss,
}
return {**ret, "progress_bar": ret}
def validation_epoch_end(self, outputs):
collect = lambda key: torch.stack([x[key] for x in outputs]).mean()
tb_logs = {
'val_loss': collect('loss'),
'val_mel_loss': collect('mel_loss'),
'val_dur_loss': collect('dur_loss'),
'val_pitch_loss': collect('pitch_loss'),
}
return {'val_loss': tb_logs['val_loss'], 'log': tb_logs}
def _loader(self, cfg):
parser = parsers.make_parser(
labels=self._cfg.labels,
name='en',
unk_id=-1,
blank_id=-1,
do_normalize=True,
abbreviation_version="fastpitch",
make_table=False,
)
dataset = FastPitchDataset(
manifest_filepath=cfg['manifest_filepath'],
parser=parser,
sample_rate=cfg['sample_rate'],
int_values=cfg.get('int_values', False),
max_duration=cfg.get('max_duration', None),
min_duration=cfg.get('min_duration', None),
max_utts=cfg.get('max_utts', 0),
trim=cfg.get('trim_silence', True),
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg['batch_size'],
collate_fn=dataset.collate_fn,
drop_last=cfg.get('drop_last', True),
shuffle=cfg['shuffle'],
num_workers=cfg.get('num_workers', 16),
)
def setup_training_data(self, cfg):
self._train_dl = self._loader(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self._loader(cfg)
def setup_test_data(self, cfg):
"""Omitted."""
pass
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
# model = PretrainedModelInfo(
# pretrained_model_name="",
# location="",
# description="",
# class_=cls,
# )
# list_of_models.append(model)
return list_of_models
| python | 9,860 |
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
"""
Usage
=====
Put usage instructions here.
Output
======
Put output information here.
Notes:
In order to run this, you'll need to verify that the "mayapy_path" class attribute corresponds to the location on
your machine. Currently I've just included mapping instructions for Maya StingrayPBS materials, although most of
the needed elements are in place to carry out additional materials inside of Maya pretty quickly moving forward.
I've marked areas that still need refinement (or to be added altogether) with TODO comments
TODO- Add command line access (Be sure to use pathlib and box libraries)
TODO- Docstrings need work... wanted to get descriptions in but they need to be set for Sphinx
TODO- Add Blender and 3ds Max interoperability
Links:
https://blender.stackexchange.com/questions/100497/use-blenders-bpy-in-projects-outside-blender
https://knowledge.autodesk.com/support/3ds-max/learn-explore/caas/CloudHelp/cloudhelp/2019/ENU/3DSMax-Batch/files/GUID-0968FF0A-5ADD-454D-B8F6-1983E76A4AF9-htm.html
TODO- Look at dynaconf and wire in a solid means for configuration settings
TODO- This hasn't been "designed"- might be worth it to consider the visual design to ensure the most effective and
attractive UI
TODO- Allow revisions to Model
TODO- Create several test files from different DCC Applications with different materials
Reading FBX file information (might come in handy later)
-- Materials information can be extracted from ASCII fbx pretty easily, binary is possible but more difficult
-- FBX files could be exported as ASCII files and I could use regex there to extract material information
-- I couldn't get pyfbx_i42 to work, but purportedly it can extract information from binary files. You may just have
to use the specified python versions
"""
import logging
import subprocess
import json
import sys
import os
import re
from PySide2 import QtWidgets, QtCore, QtGui
from PySide2.QtCore import Slot
from PySide2.QtWidgets import QApplication
from dcc_materials.model import MaterialsModel
from dcc_materials.drag_and_drop import DragAndDrop
import dcc_materials.dcc_material_mapping as mat_map
class MaterialsToLumberyard(QtWidgets.QWidget):
def __init__(self, parent=None):
super(MaterialsToLumberyard, self).__init__(parent)
self.app = QtWidgets.QApplication.instance()
self.setWindowFlags(QtCore.Qt.Window)
self.setGeometry(50, 50, 800, 520)
self.setObjectName('MaterialsToLumberyard')
self.setWindowTitle(' ')
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowMinMaxButtonsHint)
self.isTopLevel()
self.desktop_location = os.path.join(os.path.expanduser('~'), 'Desktop')
self.directory_path = os.path.dirname(os.path.abspath(__file__))
self.lumberyard_materials_directory = os.path.join(self.desktop_location, 'LumberyardMaterials')
self.mayapy_path = os.path.abspath("C:/Program Files/Autodesk/Maya2020/bin/mayapy.exe")
self.blender_path = self.get_blender_path()
self.bold_font_large = QtGui.QFont('Helvetica', 7, QtGui.QFont.Bold)
self.medium_font = QtGui.QFont('Helvetica', 7, QtGui.QFont.Normal)
self.blessed_file_extensions = 'ma mb fbx max blend'.split(' ')
self.dcc_materials_dictionary = {}
self.lumberyard_materials_dictionary = {}
self.lumberyard_material_nodes = []
self.target_file_list = []
self.current_scene = None
self.model = None
self.total_materials = 0
self.main_container = QtWidgets.QVBoxLayout(self)
self.main_container.setContentsMargins(0, 0, 0, 0)
self.main_container.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(self.main_container)
self.content_layout = QtWidgets.QVBoxLayout()
self.content_layout.setAlignment(QtCore.Qt.AlignTop)
self.content_layout.setContentsMargins(10, 3, 10, 5)
self.main_container.addLayout(self.content_layout)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->> Header Bar
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
self.header_bar_layout = QtWidgets.QHBoxLayout()
self.lumberyard_logo_layout = QtWidgets.QHBoxLayout()
self.lumberyard_logo_layout.setAlignment(QtCore.Qt.AlignLeft)
logo_path = os.path.join(self.directory_path, 'img/lumberyard_logo.png')
logo_pixmap = QtGui.QPixmap(logo_path)
self.lumberyard_logo = QtWidgets.QLabel()
self.lumberyard_logo.setPixmap(logo_pixmap)
self.lumberyard_logo_layout.addWidget(self.lumberyard_logo)
self.header_bar_layout.addLayout(self.lumberyard_logo_layout)
self.switch_combobox_layout = QtWidgets.QHBoxLayout()
self.switch_combobox_layout.setAlignment(QtCore.Qt.AlignRight)
self.switch_layout_combobox = QtWidgets.QComboBox()
self.set_combobox_items_accessibility()
self.switch_layout_combobox.setFixedSize(250, 30)
self.combobox_items = ['Add Source Files', 'Source File List', 'DCC Material Values', 'Export Materials']
self.switch_layout_combobox.setStyleSheet('QComboBox {padding-left:6px;}')
self.switch_layout_combobox.addItems(self.combobox_items)
self.switch_combobox_layout.addWidget(self.switch_layout_combobox)
self.header_bar_layout.addLayout(self.switch_combobox_layout)
self.content_layout.addSpacing(5)
self.content_layout.addLayout(self.header_bar_layout)
# ++++++++++++++++++++++++++++++++++++++++++++++++#
# File Source Table / Attributes (Stacked Layout) #
# ++++++++++++++++++++++++++++++++++++++++++++++++#
self.content_stacked_layout = QtWidgets.QStackedLayout()
self.content_layout.addLayout(self.content_stacked_layout)
self.switch_layout_combobox.currentIndexChanged.connect(self.layout_combobox_changed)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->> Add Source Files
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
frame_color_value = '75,75,75'
highlight_color_value = '20,106,30'
self.drag_and_drop_widget = DragAndDrop(frame_color_value, highlight_color_value)
self.drag_and_drop_widget.drop_update.connect(self.drag_and_drop_file_update)
self.drag_and_drop_widget.drop_over.connect(self.drag_and_drop_over)
self.drag_and_drop_layout = QtWidgets.QVBoxLayout()
self.drag_and_drop_layout.setContentsMargins(0, 0, 0, 0)
self.drag_and_drop_layout.setAlignment(QtCore.Qt.AlignCenter)
self.drag_and_drop_widget.setLayout(self.drag_and_drop_layout)
start_message = 'Drag source files here, or use file browser button below to get started.'
self.drag_and_drop_label = QtWidgets.QLabel(start_message)
self.drag_and_drop_label.setStyleSheet('color: white;')
self.drag_and_drop_layout.addWidget(self.drag_and_drop_label)
self.drag_and_drop_layout.addSpacing(10)
self.select_files_button_layout = QtWidgets.QHBoxLayout()
self.select_files_button_layout.setAlignment(QtCore.Qt.AlignCenter)
self.select_files_button = QtWidgets.QPushButton('Select Files')
self.select_files_button_layout.addWidget(self.select_files_button)
self.select_files_button.clicked.connect(self.select_files_button_clicked)
self.select_files_button.setFixedSize(80, 35)
self.drag_and_drop_layout.addLayout(self.select_files_button_layout)
self.content_stacked_layout.addWidget(self.drag_and_drop_widget)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->> Files Table
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
self.target_files_table = QtWidgets.QTableWidget()
self.target_files_table.setFocusPolicy(QtCore.Qt.NoFocus)
self.target_files_table.setColumnCount(2)
self.target_files_table.setAlternatingRowColors(True)
self.target_files_table.setHorizontalHeaderLabels(['File List', ''])
self.target_files_table.horizontalHeader().setStyleSheet('QHeaderView::section {background-color: rgb(220, 220, 220); padding-top:7px; padding-left:5px;}')
self.target_files_table.verticalHeader().hide()
files_header = self.target_files_table.horizontalHeader()
files_header.setFixedHeight(30)
files_header.setDefaultAlignment(QtCore.Qt.AlignLeft)
files_header.setContentsMargins(10, 10, 0, 0)
files_header.setDefaultSectionSize(60)
files_header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
files_header.setSectionResizeMode(1, QtWidgets.QHeaderView.Fixed)
self.target_files_table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.content_stacked_layout.addWidget(self.target_files_table)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->> Scene Information Table
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
self.material_tree_view = QtWidgets.QTreeView()
self.headers = ['Key', 'Value']
self.material_tree_view.setStyleSheet('QTreeView::item {height:25px;} QHeaderView::section {background-color: rgb(220, 220, 220); height:30px; padding-left:10px}')
self.material_tree_view.setFocusPolicy(QtCore.Qt.NoFocus)
self.material_tree_view.setAlternatingRowColors(True)
self.material_tree_view.setUniformRowHeights(True)
self.content_stacked_layout.addWidget(self.material_tree_view)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->> LY Material Definitions
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
self.lumberyard_material_definitions_widget = QtWidgets.QWidget()
self.lumberyard_material_definitions_layout = QtWidgets.QHBoxLayout(self.lumberyard_material_definitions_widget)
self.lumberyard_material_definitions_layout.setSpacing(0)
self.lumberyard_material_definitions_layout.setContentsMargins(0, 0, 0, 0)
self.lumberyard_material_definitions_frame = QtWidgets.QFrame(self.lumberyard_material_definitions_widget)
self.lumberyard_material_definitions_frame.setGeometry(0, 0, 5000, 5000)
self.lumberyard_material_definitions_frame.setStyleSheet('background-color:rgb(75,75,75);')
self.lumberyard_material_definitions_scroller = QtWidgets.QScrollArea()
self.scroller_widget = QtWidgets.QWidget()
self.scroller_layout = QtWidgets.QVBoxLayout()
self.scroller_widget.setLayout(self.scroller_layout)
self.lumberyard_material_definitions_scroller.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.lumberyard_material_definitions_scroller.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.lumberyard_material_definitions_scroller.setWidgetResizable(True)
self.lumberyard_material_definitions_scroller.setWidget(self.scroller_widget)
self.lumberyard_material_definitions_layout.addWidget(self.lumberyard_material_definitions_scroller)
self.content_stacked_layout.addWidget(self.lumberyard_material_definitions_widget)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->> File processing buttons
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
self.process_files_layout = QtWidgets.QHBoxLayout()
self.content_layout.addLayout(self.process_files_layout)
self.process_files_button = QtWidgets.QPushButton('Process Added Files')
self.process_files_button.setFixedHeight(50)
self.process_files_button.clicked.connect(self.process_listed_files_clicked)
self.process_files_layout.addWidget(self.process_files_button)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->> Status bar / Loader
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# TODO- Move all processing of files to another thread and display progress with loader
self.status_bar = QtWidgets.QStatusBar()
self.status_bar.setStyleSheet('background-color: rgb(220, 220, 220);')
self.status_bar.setContentsMargins(0, 0, 0, 0)
self.status_bar.setSizeGripEnabled(False)
self.message_readout_label = QtWidgets.QLabel('Ready.')
self.message_readout_label.setStyleSheet('padding-left: 10px')
self.status_bar.addWidget(self.message_readout_label)
self.progress_bar = QtWidgets.QProgressBar()
self.progress_bar_widget = QtWidgets.QWidget()
self.progress_bar_widget_layout = QtWidgets.QHBoxLayout()
self.progress_bar_widget_layout.setContentsMargins(0, 0, 0, 0)
self.progress_bar_widget_layout.setAlignment(QtCore.Qt.AlignRight)
self.progress_bar_widget.setLayout(self.progress_bar_widget_layout)
self.status_bar.addPermanentWidget(self.progress_bar_widget)
self.progress_bar_widget_layout.addWidget(self.progress_bar)
self.progress_bar.setFixedSize(180, 20)
self.main_container.addWidget(self.status_bar)
############################
# UI Display Layers ########
############################
def populate_source_files_table(self):
"""
Adds selected files from the 'Source Files' section of the UI. This creates each item listing in the table
as well as adds a 'Remove' button that will clear corresponding item from the table. Processed files will
get color coded, based on whether or not the materials in the file could be successfully processed. Subsequent
searches will not clear items from the table currently, as each item acts as a register of materials that have
and have not yet been processed.
:return:
"""
self.target_files_table.setRowCount(0)
for index, entry in enumerate(self.target_file_list):
entry = entry[1] if type(entry) == list else entry
self.target_files_table.insertRow(index)
item = QtWidgets.QTableWidgetItem(' {}'.format(entry))
self.target_files_table.setRowHeight(index, 45)
remove_button = QtWidgets.QPushButton('Remove')
remove_button.setFixedWidth(60)
remove_button.clicked.connect(self.remove_source_file_clicked)
self.target_files_table.setItem(index, 0, item)
self.target_files_table.setCellWidget(index, 1, remove_button)
def populate_dcc_material_values_tree(self):
"""
Sets the materials model class to the file attribute tree.
:return:
"""
# TODO- Create mechanism for collapsing previously gathered materials, and or pushing them further down the list
self.material_tree_view.setModel(self.model)
self.material_tree_view.expandAll()
self.material_tree_view.resizeColumnToContents(0)
def populate_export_materials_list(self):
"""
Once all materials have been analyzed inside of DCC applications, the 'Export Materials' view lists all
materials presented as their Lumberyard counterparts. Each listing displays a representation of the material
file based on its corresponding DCC material values and file connections.
:return:
"""
self.reset_export_materials_description()
for count, value in enumerate(self.lumberyard_materials_dictionary):
material_definition_node = MaterialNode([value, self.lumberyard_materials_dictionary[value]], count)
self.lumberyard_material_nodes.append(material_definition_node)
self.scroller_layout.addWidget(material_definition_node)
self.scroller_layout.addLayout(self.create_separator_line())
############################
# TBD ########
############################
def process_file_list(self):
"""
The entry point for reading DCC files and extracting values. Files are filtered and separated
by DCC app (based on file extensions) before processing is done.
Supported DCC applications:
Maya (.ma, .mb, .fbx), 3dsMax(.max), Blender(.blend)
:return:
"""
files_dict = {'maya': [], 'max': [], 'blender': [], 'na': []}
for file_location in self.target_file_list:
file_name = os.path.basename(str(file_location))
file_extension = os.path.splitext(file_name)[1]
target_application = self.get_target_application(file_extension)
if target_application in files_dict.keys():
files_dict[target_application].append(file_location)
for key, values in files_dict.items():
try:
if key == 'maya' and len(values):
self.get_maya_material_values(values)
elif key == 'max' and len(values):
self.get_max_material_values(values)
elif key == 'blender' and len(values):
self.get_blender_material_values(values)
else:
pass
except Exception as e:
# TODO- Allow corrective actions or some display of errors if this fails?
logging.warning('Could not process files. Error: {}'.format(e))
if self.dcc_materials_dictionary:
self.set_transfer_status(self.dcc_materials_dictionary)
# Create Model with extracted values from file list
self.set_material_model()
# Setup Lumberyard Material File Values
self.set_export_materials_description()
# Update UI Layout
self.populate_export_materials_list()
self.switch_layout_combobox.setCurrentIndex(3)
self.set_ui_buttons()
self.message_readout_label.setText('Ready.')
def reset_export_materials_description(self):
pass
def reset_all_values(self):
pass
def create_separator_line(self):
""" Convenience function for adding separation line to the UI. """
layout = QtWidgets.QHBoxLayout()
line = QtWidgets.QLabel()
line.setFrameStyle(QtWidgets.QFrame.HLine | QtWidgets.QFrame.Sunken)
line.setLineWidth(1)
line.setFixedHeight(10)
layout.addWidget(line)
layout.setContentsMargins(8, 0, 8, 0)
return layout
def export_selected_materials(self):
"""
This will eventually be revised to save material definitions in the proper place in the user's project folder,
but for now material definitions will be saved to the desktop.
:return:
"""
if not os.path.exists(self.lumberyard_materials_directory):
os.makedirs(self.lumberyard_materials_directory)
for node in self.lumberyard_material_nodes:
if node.material_name_checkbox.isChecked():
output = os.path.join(self.lumberyard_materials_directory, '{}.material'.format(node.material_name))
with open(output, 'w', encoding='utf-8') as material_file:
json.dump(node.material_info, material_file, ensure_ascii=False, indent=4)
############################
# Getters/Setters ##########
############################
@staticmethod
def get_target_application(file_extension):
"""
Searches compatible file extensions and returns one of three Application names- Maya, 3dsMax, or Blender.
:param file_extension: Passed file extension used to determine DCC Application it originated from.
:return: Returns the application corresponding to the extension if found- otherwise returns a Boolean None
"""
app_extensions = {'maya': ['.ma', '.mb', '.fbx'], 'max': ['.max'], 'blender': ['.blend']}
target_dcc_application = [key for key, values in app_extensions.items() if file_extension in values]
if target_dcc_application:
return target_dcc_application[0]
return None
@staticmethod
def get_lumberyard_material_template(shader_type):
"""
Loads material descriptions from the Lumberyard installation, providing a template to compare and convert DCC
shaders to Lumberyard material definitions. This is the first step in the comparison. The second step is to
compare these values with specific mapping instructions for DCC Application and DCC material type to arrive at
a converted material.
:param shader_type: The type of Lumberyard shader to pair material attributes to (i.e. PBR Shader)
:return: File dictionary of the available boilerplate Lumberyard shader settings.
"""
definitions = os.path.join(os.path.dirname(os.path.abspath(__file__)), '{}.material'.format(shader_type))
if os.path.exists(definitions):
with open(definitions) as f:
return json.load(f)
@staticmethod
def get_lumberyard_material_properties(name, dcc_app, material_type, file_connections):
"""
This system will probably need rethinking if DCCs and compatible materials grow. I've tried to keep this
flexible so that it can be expanded with more apps and materials.
:param name: Material name from within the DCC application
:param dcc_app: The application that the material was sourced from
:param material_type: DCC material type
:param file_connections: Texture files found attached to the materials
"""
material_properties = {}
if dcc_app == 'Maya':
material_properties = mat_map.get_maya_material_mapping(name, material_type, file_connections)
elif dcc_app == 'Blender':
material_properties = mat_map.get_blender_material_mapping(name, material_type, file_connections)
elif dcc_app == '3dsMax':
material_properties = mat_map.get_max_material_mapping(name, material_type, file_connections)
else:
pass
return material_properties
@staticmethod
def get_filename_increment(name):
"""
Convenience function that assists in ensuring that if any materials are encountered with the same name, an
underscore and number is appended to it to prevent overwrites.
:param name: The name of the material. The function searches the string for increment numbers, and either adds
one to any encountered, or adds an "_1" if passed name is the first duplicate encountered.
:return: The adjusted name with a unique incremental value.
"""
last_number = re.compile(r'(?:[^\d]*(\d+)[^\d]*)+')
number_found = last_number.search(name)
if number_found:
next_number = str(int(number_found.group(1)) + 1)
start, end = number_found.span(1)
name = name[:max(end - len(next_number), start)] + next_number + name[end:]
return name
def get_maya_material_values(self, target_files):
"""
Launches Maya Standalone and processes list of materials for each scene passed to the 'target_files' argument.
Also sets the environment paths needed for an instance of Maya's Python distribution. After files are processed
a single dictionary of scene materials is returned, and added to the "materials_dictionary" scene attribute.
:param target_files: List of files filtered from total list of files requested for processing that have a
Maya file extension
:return:
"""
# TODO- Set load process to a separate thread and wire load progress bar up
try:
script_path = str(os.path.join(self.directory_path, 'maya_materials.py'))
target_files.append(self.total_materials)
runtime_env = os.environ.copy()
runtime_env['MAYA_LOCATION'] = os.path.dirname(self.mayapy_path)
runtime_env['PYTHONPATH'] = os.path.dirname(self.mayapy_path)
command = f'{self.mayapy_path} "{script_path}"'
for file in target_files:
command += f' "{file}"'
p = subprocess.Popen(command, shell=False, env=runtime_env, stdout=subprocess.PIPE)
output = p.communicate()[0]
self.set_material_dictionary(json.loads(output))
except Exception as e:
logging.warning('maya error: {}'.format(e))
def get_max_material_values(self, target_files):
logging.debug('Max Target file: {}'.format(target_files))
def get_blender_material_values(self, target_files):
logging.debug('Blender Target file: {}'.format(target_files))
script_path = str(os.path.join(self.directory_path, 'blender_materials.py'))
# command = f'{self.blender_path} "{script_path}" "{target_files}" "{self.total_materials}"'
# p = subprocess.Popen(command, shell=True, env=runtime_env, stdout=subprocess.PIPE)
target_files.append(self.total_materials)
p = subprocess.Popen([self.blender_path, '--background', '--python', script_path, '--', target_files])
output = p.communicate()[0]
self.set_material_dictionary(json.loads(output))
def get_blender_path(self):
blender_base_directory = os.path.join(os.path.join('C:\\', 'Program Files', 'Blender Foundation'))
blender_versions_found = []
for (dirpath, dirnames, filenames) in os.walk(blender_base_directory):
for filename in filenames:
if filename == 'blender.exe':
blender_versions_found.append(os.path.join(dirpath, filename))
if blender_versions_found:
return max(blender_versions_found, key=os.path.getctime)
else:
return None
def set_combobox_items_accessibility(self):
"""
Locks items from within the combobox until the sections they connect to have content
:return:
"""
# TODO- Add this functionality
pass
def set_transfer_status(self, transfer_info):
"""
Colorizes listings in the 'Source Files' view of the UI after processing to green or red, indicating whether or
not scene analysis successfully returned compatible materials and their values.
:param transfer_info: Each file the scripts attempt to process return a receipt of the success or failure of
the analysis.
:return:
"""
# TODO- Include some way to get error information if analysis fails, and potentially offer the means to
# effectively repair values as they mat_map to intended Lumberyard shader type
for row in range(self.target_files_table.rowCount()):
for key, values in transfer_info.items():
row_path = self.target_files_table.item(row, 0).text().strip()
scene_processed = {x for x in transfer_info if values['SceneName'] == row_path}
if scene_processed:
self.target_files_table.item(row, 0).setBackground(QtGui.QColor(192, 255, 171))
break
else:
self.target_files_table.item(row, 0).setBackground(QtGui.QColor(255, 177, 171))
def set_export_materials_description(self):
root = self.model.rootItem
for row in range(self.model.rowCount()):
source_file = self.model.get_attribute_value('SceneName', root.child(row))
name = self.model.get_attribute_value('MaterialName', root.child(row))
material_type = self.model.get_attribute_value('MaterialType', root.child(row))
dcc_app = self.model.get_attribute_value('DccApplication', root.child(row))
file_connections = {}
shader_attributes = {}
for childIndex in range(root.child(row).childCount()):
child_item = root.child(row).child(childIndex)
child_value = child_item.itemData
if child_item.childCount():
target_dict = file_connections if child_value[0] == 'FileConnections' else shader_attributes
for subChildIndex in range(child_item.childCount()):
sub_child_data = child_item.child(subChildIndex).itemData
target_dict[sub_child_data[0]] = sub_child_data[1]
self.set_material_description(source_file, name, dcc_app, material_type, file_connections)
def set_material_dictionary(self, dcc_dictionary):
"""
Adds all material descriptions pulled from each DCC file analyzed to the "materials_dictionary" class attribute.
This function runs each time a subprocess is launched to gather DCC application material values.
:param dcc_dictionary: The dictionary of values for each material analyzed by each specific DCC file list
return analyzed values
:return:
"""
logging.debug('DCC Dictionary: {}'.format(json.dumps(dcc_dictionary, indent=4)))
self.total_materials += len(dcc_dictionary)
self.dcc_materials_dictionary.update(dcc_dictionary)
def set_material_model(self, initialize=True):
"""
Once all materials have been gathered across a selected file set query, this organizes the values into a
QT Model Class
:param initialize: Default is set to boolean True. If a model has already been established in the current
session, the initialize parameter would be set to false, and the values added to the Model. All changes to
the model would then be redistributed to other informational views in the UI.
:return:
"""
if initialize:
self.model = MaterialsModel(self.headers, self.dcc_materials_dictionary)
else:
self.model.update()
self.dcc_materials_dictionary.clear()
self.populate_dcc_material_values_tree()
def set_ui_buttons(self):
"""
Handles UI buttons for each of the three stacked layout views (Source Files, DCC Material Values,
Export Materials)
:return:
"""
display_index = self.content_stacked_layout.currentIndex()
self.switch_layout_combobox.setEnabled(True)
self.process_files_button.setText('Process Listed Files')
# Add Source Files Layout ------------------------------->>
if display_index == 0:
self.process_files_button.setEnabled(True)
# Source File List -------------------------------------->>
elif display_index == 1:
self.process_files_button.setEnabled(True)
# DCC Material Values Layout ---------------------------->>
elif display_index == 2:
self.process_files_button.setEnabled(False)
# Export Materials Layout ------------------------------->>
else:
self.process_files_button.setText('Export Selected Materials')
if self.lumberyard_materials_dictionary:
self.process_files_button.setEnabled(True)
def set_material_description(self, source_file, name, dcc_app, material_type, file_connections):
"""
Build dictionary for material description based on extracted values
:param source_file: The file that the material was extracted from
:param name: Name of material
:param dcc_app: Source file type of material (Maya, Blender or 3ds Max)
:param material_type: Material type within app (i.e. Stingray PBS)
:param file_connections: Texture files found connected to the shader
:return:
"""
default_settings = self.get_lumberyard_material_template('pbr')
material = {'sourceFile': source_file,
'description': name,
'materialType': default_settings.get('materialType'),
'parentMaterial': default_settings.get('parentMaterial'),
'propertyLayoutVersion': default_settings.get('propertyLayoutVersion'),
'properties': self.get_lumberyard_material_properties(name, dcc_app, material_type, file_connections)}
self.lumberyard_materials_dictionary[name if name not in self.lumberyard_materials_dictionary.keys() else
self.get_filename_increment(name)] = material
############################
# Button Actions ###########
############################
def remove_source_file_clicked(self):
"""
In the Source File view of the UI layout, this will remove the listed file in its respective row. If files
have not been processed yet, it prevents that file from being analyzed. If the files have already been
analyzed, this will remove the materials from stored values.
:return:
"""
file_index = self.target_files_table.indexAt(self.sender().pos())
del self.target_file_list[file_index.row()]
self.populate_files_table()
def process_listed_files_clicked(self):
"""
The button serves a dual purpose, depending on the current layout of the window. 'Process listed files'
initiates the DCC file analysis that extracts material information. In the "Export Materials" layout, this
button (for now) will export material files corresponding to each analyzed material.
:return:
"""
# TODO- Need to decide how the materials are going to be routed. At this stage they will just be saved to the
# desktop, but I assume that we want these files to be saved to an associated project folder
if self.sender().text() == 'Process Added Files':
self.message_readout_label.setText('Gathering Material Information...')
self.app.processEvents()
self.process_file_list()
else:
self.export_selected_materials()
def select_files_button_clicked(self):
"""
This dialog allows user to select DCC files to be processed for the materials present for conversion.
:return:
"""
# TODO- Eventually it might be worth it to allow files from multiple locations to be selected. Currently
# this only allows single/multiple files from a single directory to be selected.
dialog = QtWidgets.QFileDialog(self, 'Shift-Select Target Files', self.desktop_location)
dialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)
dialog.setNameFilter('Compatible Files (*.ma *.mb *.fbx *.max *.blend)')
dialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)
file_view = dialog.findChild(QtWidgets.QListView, 'listView')
# Workaround for selecting multiple files with File Dialog
if file_view:
file_view.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
f_tree_view = dialog.findChild(QtWidgets.QTreeView)
if f_tree_view:
f_tree_view.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
if dialog.exec_() == QtWidgets.QDialog.Accepted:
self.target_file_list += dialog.selectedFiles()
if self.target_file_list:
self.populate_source_files_table()
self.process_files_button.setEnabled(True)
def layout_combobox_changed(self):
"""
Handles main window layout combobox index change.
:return:
"""
self.content_stacked_layout.setCurrentIndex(self.switch_layout_combobox.currentIndex())
self.set_ui_buttons()
def reset_clicked(self):
"""
Brings the application and all variables back to their initial state.
:return:
"""
self.reset_all_values()
############################
# Slots ####################
############################
@Slot(list)
def drag_and_drop_file_update(self, file_list):
for file in file_list:
if os.path.basename(file).split('.')[-1] in self.blessed_file_extensions:
self.target_file_list.append(file)
self.drag_and_drop_widget.urls.clear()
self.populate_source_files_table()
self.message_readout_label.setText('Source files added: {}'.format(len(self.target_file_list)))
self.drag_and_drop_label.setStyleSheet('color: white;')
@Slot(bool)
def drag_and_drop_over(self, is_over):
if is_over:
self.drag_and_drop_label.setStyleSheet('color: rgb(0, 255, 0);')
else:
self.drag_and_drop_label.setStyleSheet('color: white;')
class MaterialNode(QtWidgets.QWidget):
def __init__(self, material_info, current_position, parent=None):
super(MaterialNode, self).__init__(parent)
self.material_name = material_info[0]
self.material_info = material_info[1]
self.current_position = current_position
self.property_settings = {}
self.small_font = QtGui.QFont("Helvetica", 7, QtGui.QFont.Bold)
self.bold_font = QtGui.QFont("Helvetica", 8, QtGui.QFont.Bold)
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.main_layout)
self.background_frame = QtWidgets.QFrame(self)
self.background_frame.setGeometry(0, 0, 5000, 5000)
self.background_frame.setStyleSheet('background-color:rgb(220, 220, 220);')
# ########################
# Title Bar
# ########################
self.title_bar_widget = QtWidgets.QWidget()
self.title_bar_layout = QtWidgets.QHBoxLayout(self.title_bar_widget)
self.title_bar_layout.setContentsMargins(10, 0, 10, 0)
self.title_bar_layout.setAlignment(QtCore.Qt.AlignTop)
self.title_bar_frame = QtWidgets.QFrame(self.title_bar_widget)
self.title_bar_frame.setGeometry(0, 0, 5000, 40)
self.title_bar_frame.setStyleSheet('background-color:rgb(193,154,255);')
self.main_layout.addWidget(self.title_bar_widget)
self.material_name_checkbox = QtWidgets.QCheckBox(self.material_name)
self.material_name_checkbox.setFixedHeight(35)
self.material_name_checkbox.setStyleSheet('spacing:10px; color:white')
self.material_name_checkbox.setFont(self.bold_font)
self.material_name_checkbox.setChecked(True)
self.title_bar_layout.addWidget(self.material_name_checkbox)
self.material_file_layout = QtWidgets.QHBoxLayout()
self.material_file_layout.setAlignment(QtCore.Qt.AlignRight)
self.source_file = QtWidgets.QLabel(os.path.basename(self.material_info['sourceFile']))
self.source_file.setStyleSheet('color:white;')
self.source_file.setFont(self.small_font)
self.material_file_layout.addWidget(self.source_file)
self.material_file_layout.addSpacing(10)
self.edit_button = QtWidgets.QPushButton('Edit')
self.edit_button.clicked.connect(self.edit_button_clicked)
self.edit_button.setFixedWidth(55)
self.material_file_layout.addWidget(self.edit_button)
self.title_bar_layout.addLayout(self.material_file_layout)
self.information_layout = QtWidgets.QHBoxLayout()
self.information_layout.setContentsMargins(10, 0, 10, 10)
self.main_layout.addLayout(self.information_layout)
# ########################
# Details layout
# ########################
self.details_layout = QtWidgets.QVBoxLayout()
self.details_layout.setAlignment(QtCore.Qt.AlignTop)
self.details_groupbox = QtWidgets.QGroupBox("Details")
self.details_groupbox.setFixedWidth(200)
self.details_groupbox.setStyleSheet("QGroupBox {font:bold; border: 1px solid silver; "
"margin-top: 6px;} QGroupBox::title { color: rgb(150, 150, 150); "
"subcontrol-position: top left;}")
self.details_layout.addSpacing(15)
self.material_type_label = QtWidgets.QLabel('Material Type')
self.material_type_label.setStyleSheet('padding-left: 6px; color: white; background-color:rgb(175, 175, 175);')
self.material_type_label.setFixedHeight(25)
self.material_type_label.setFont(self.bold_font)
self.details_layout.addWidget(self.material_type_label)
self.material_type_combobox = QtWidgets.QComboBox()
self.material_type_combobox.setFixedHeight(30)
self.material_type_combobox.setStyleSheet('QCombobox QAbstractItemView { padding-left: 15px; }')
material_type_items = [' Standard PBR']
self.material_type_combobox.addItems(material_type_items)
self.details_layout.addWidget(self.material_type_combobox)
self.details_layout.addSpacing(10)
self.description_label = QtWidgets.QLabel('Description')
self.description_label.setStyleSheet('padding-left: 6px; color: white; background-color:rgb(175, 175, 175);')
self.description_label.setFixedHeight(25)
self.description_label.setFont(self.bold_font)
self.details_layout.addWidget(self.description_label)
self.description_box = QtWidgets.QTextEdit('This space is reserved for additional information.')
self.details_layout.addWidget(self.description_box)
self.information_layout.addWidget(self.details_groupbox)
self.details_groupbox.setLayout(self.details_layout)
# ########################
# Properties layout
# ########################
self.properties_layout = QtWidgets.QVBoxLayout()
self.properties_layout.setAlignment(QtCore.Qt.AlignTop)
self.properties_groupbox = QtWidgets.QGroupBox("Properties")
self.properties_groupbox.setFixedWidth(150)
self.properties_groupbox.setStyleSheet("QGroupBox {font:bold; border: 1px solid silver; "
"margin-top: 6px;} QGroupBox::title { color: rgb(150, 150, 150); "
"subcontrol-position: top left;}")
self.properties_list_widget = QtWidgets.QListWidget()
self.material_properties = ['ambientOcclusion', 'baseColor', 'emissive', 'metallic', 'roughness', 'specularF0',
'normal', 'opacity']
self.properties_list_widget.addItems(self.material_properties)
self.properties_list_widget.itemSelectionChanged.connect(self.property_selection_changed)
self.properties_layout.addSpacing(15)
self.properties_layout.addWidget(self.properties_list_widget)
self.information_layout.addWidget(self.properties_groupbox)
self.properties_groupbox.setLayout(self.properties_layout)
# ########################
# Attributes layout
# ########################
self.attributes_layout = QtWidgets.QVBoxLayout()
self.attributes_layout.setAlignment(QtCore.Qt.AlignTop)
self.attributes_groupbox = QtWidgets.QGroupBox("Attributes")
self.attributes_groupbox.setStyleSheet("QGroupBox {font:bold; border: 1px solid silver; "
"margin-top: 6px;} QGroupBox::title { color: rgb(150, 150, 150); "
"subcontrol-position: top left;}")
self.information_layout.addWidget(self.attributes_groupbox)
self.attributes_layout.addSpacing(15)
self.attributes_table = QtWidgets.QTableWidget()
self.attributes_table.setFocusPolicy(QtCore.Qt.NoFocus)
self.attributes_table.setColumnCount(2)
self.attributes_table.setAlternatingRowColors(True)
self.attributes_table.setHorizontalHeaderLabels(['Attribute', 'Value'])
self.attributes_table.verticalHeader().hide()
attributes_table_header = self.attributes_table.horizontalHeader()
attributes_table_header.setStyleSheet('QHeaderView::section {background-color: rgb(220, 220, 220);}')
attributes_table_header.setDefaultAlignment(QtCore.Qt.AlignLeft)
attributes_table_header.setContentsMargins(10, 10, 0, 0)
attributes_table_header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
attributes_table_header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
attributes_table_header.setSectionResizeMode(0, QtWidgets.QHeaderView.Interactive)
self.attributes_layout.addWidget(self.attributes_table)
self.attributes_groupbox.setLayout(self.attributes_layout)
self.initialize_display_values()
def initialize_display_values(self):
"""
Initializes all of the widget item information for material based on the DCC application info the class has
been passed.
:return:
"""
for material_property in self.material_properties:
if material_property in self.material_info.get('properties'):
self.property_settings[material_property] = self.material_info['properties'].get(material_property)
current_row = self.material_properties.index(material_property)
current_item = self.properties_list_widget.takeItem(current_row)
self.properties_list_widget.insertItem(0, current_item)
else:
self.property_settings[material_property] = 'inactive'
current_row = self.material_properties.index(material_property)
item = self.properties_list_widget.item(current_row)
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEnabled)
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsSelectable)
self.properties_list_widget.setCurrentRow(0)
self.set_attributes_table(self.get_selected_property())
def set_attributes_table(self, selected_property):
"""
Displays the key, value pairs for the item selected in the Properties list widget
:param selected_property: The item in the Properties list widget that is currently selected. Only active
values are displayed.
:return:
"""
self.attributes_table.setRowCount(0)
row_count = 0
for key, value in self.property_settings[selected_property].items():
self.attributes_table.insertRow(row_count)
key_item = QtWidgets.QTableWidgetItem(key)
self.attributes_table.setItem(row_count, 0, key_item)
value_item = QtWidgets.QTableWidgetItem(value)
self.attributes_table.setItem(row_count, 1, value_item)
row_count += 1
def get_selected_property(self):
"""
Convenience function to get current value selected in the Properties list widget.
:return:
"""
return self.properties_list_widget.currentItem().text()
def update_model(self):
"""
Not sure if this will go away, but if desired, I could make attribute values able to be revised after
materials have been scraped from the DCC materials
:return:
"""
pass
def edit_button_clicked(self):
"""
This is in place in the event that we want to allow material revisions for properties to be made after
DCC processing step has already been executed. The idea would basically be to surface an editable
table where values can be added, removed or changed within the final material definition.
:return:
"""
logging.debug('Edit button clicked')
def property_selection_changed(self):
"""
Fired when index of list view selected property selection has changed.
:return:
"""
self.set_attributes_table(self.get_selected_property())
if __name__ == '__main__':
app = QApplication(sys.argv)
materials_to_lumberyard = MaterialsToLumberyard()
materials_to_lumberyard.show()
sys.exit(app.exec_())
| python | 47,775 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.