max_stars_repo_path
stringlengths 6
149
| max_stars_repo_name
stringclasses 629
values | max_stars_count
int64 2.02k
191k
| id
stringlengths 3
8
| content
stringlengths 15
158k
| violations
stringclasses 51
values | bugs
stringclasses 11
values | duplicated_lines_density
stringclasses 46
values | cognitive_complexity
stringclasses 117
values | vulnerabilities
stringclasses 3
values | code_smells
stringclasses 50
values | sqale_rating
stringclasses 4
values | security_hotspots
stringclasses 17
values | complexity
stringclasses 112
values | issues
listlengths 0
100
| __index_level_0__
int64 2
13.3k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
src/python/grpcio_tests/tests/unit/_cython/_server_test.py
|
warlock135/grpc
| 36,552 |
8047522
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test servers at the level of the Cython API."""
import threading
import time
import unittest
from grpc._cython import cygrpc
class Test(unittest.TestCase):
def test_lonely_server(self):
server_call_completion_queue = cygrpc.CompletionQueue()
server_shutdown_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(None, False)
server.register_completion_queue(server_call_completion_queue)
server.register_completion_queue(server_shutdown_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
server_request_call_tag = 'server_request_call_tag'
server_request_call_start_batch_result = server.request_call(
server_call_completion_queue, server_call_completion_queue,
server_request_call_tag)
time.sleep(4)
server_shutdown_tag = 'server_shutdown_tag'
server_shutdown_result = server.shutdown(
server_shutdown_completion_queue, server_shutdown_tag)
server_request_call_event = server_call_completion_queue.poll()
server_shutdown_event = server_shutdown_completion_queue.poll()
if __name__ == '__main__':
unittest.main(verbosity=2)
|
5
|
0
|
0.0
|
1
|
0
|
5
|
1.0
|
0
|
2
|
[
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 31,
"message": "Remove the unused local variable \"port\".",
"textRange": {
"endLine": 31,
"endOffset": 12,
"startLine": 31,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 35,
"message": "Remove the unused local variable \"server_request_call_start_batch_result\".",
"textRange": {
"endLine": 35,
"endOffset": 46,
"startLine": 35,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 42,
"message": "Remove the unused local variable \"server_shutdown_result\".",
"textRange": {
"endLine": 42,
"endOffset": 30,
"startLine": 42,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 44,
"message": "Remove the unused local variable \"server_request_call_event\".",
"textRange": {
"endLine": 44,
"endOffset": 33,
"startLine": 44,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 45,
"message": "Remove the unused local variable \"server_shutdown_event\".",
"textRange": {
"endLine": 45,
"endOffset": 29,
"startLine": 45,
"startOffset": 8
},
"type": "CODE_SMELL"
}
] | 4,926 |
desktop/core/ext-py/nose-1.3.7/examples/plugin/plug.py
|
kokosing/hue
| 5,079 |
3381786
|
<filename>desktop/core/ext-py/nose-1.3.7/examples/plugin/plug.py
from nose.plugins import Plugin
class ExamplePlugin(Plugin):
pass
| null | null | null | null | null | null | null | null | null |
[] | 2,918 |
tests/db_functions/math/test_log.py
|
Lord-Elrond/django
| 61,676 |
140063
|
<reponame>Lord-Elrond/django
import math
from decimal import Decimal
from django.db.models.functions import Log
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class LogTests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_log_small=Log('small', 'normal'),
null_log_normal=Log('normal', 'big'),
null_log_big=Log('big', 'normal'),
).first()
self.assertIsNone(obj.null_log_small)
self.assertIsNone(obj.null_log_normal)
self.assertIsNone(obj.null_log_big)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('12.9'), n2=Decimal('3.6'))
obj = DecimalModel.objects.annotate(n_log=Log('n1', 'n2')).first()
self.assertIsInstance(obj.n_log, Decimal)
self.assertAlmostEqual(obj.n_log, Decimal(math.log(obj.n2, obj.n1)))
def test_float(self):
FloatModel.objects.create(f1=2.0, f2=4.0)
obj = FloatModel.objects.annotate(f_log=Log('f1', 'f2')).first()
self.assertIsInstance(obj.f_log, float)
self.assertAlmostEqual(obj.f_log, math.log(obj.f2, obj.f1))
def test_integer(self):
IntegerModel.objects.create(small=4, normal=8, big=2)
obj = IntegerModel.objects.annotate(
small_log=Log('small', 'big'),
normal_log=Log('normal', 'big'),
big_log=Log('big', 'big'),
).first()
self.assertIsInstance(obj.small_log, float)
self.assertIsInstance(obj.normal_log, float)
self.assertIsInstance(obj.big_log, float)
self.assertAlmostEqual(obj.small_log, math.log(obj.big, obj.small))
self.assertAlmostEqual(obj.normal_log, math.log(obj.big, obj.normal))
self.assertAlmostEqual(obj.big_log, math.log(obj.big, obj.big))
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
4
|
[] | 863 |
sklearn/gaussian_process/tests/_mini_sequence_kernel.py
|
MaiRajborirug/scikit-learn
| 50,961 |
91634
|
from sklearn.gaussian_process.kernels import Kernel, Hyperparameter
from sklearn.gaussian_process.kernels import GenericKernelMixin
from sklearn.gaussian_process.kernels import StationaryKernelMixin
import numpy as np
from sklearn.base import clone
class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel):
"""
A minimal (but valid) convolutional kernel for sequences of variable
length.
"""
def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
self.baseline_similarity = baseline_similarity
self.baseline_similarity_bounds = baseline_similarity_bounds
@property
def hyperparameter_baseline_similarity(self):
return Hyperparameter(
"baseline_similarity", "numeric", self.baseline_similarity_bounds
)
def _f(self, s1, s2):
return sum(
[1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
)
def _g(self, s1, s2):
return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
def __call__(self, X, Y=None, eval_gradient=False):
if Y is None:
Y = X
if eval_gradient:
return (
np.array([[self._f(x, y) for y in Y] for x in X]),
np.array([[[self._g(x, y)] for y in Y] for x in X]),
)
else:
return np.array([[self._f(x, y) for y in Y] for x in X])
def diag(self, X):
return np.array([self._f(x, x) for x in X])
def clone_with_theta(self, theta):
cloned = clone(self)
cloned.theta = theta
return cloned
|
3
|
0
|
0.0
|
5
|
0
|
3
|
1.0
|
0
|
11
|
[
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 32,
"message": "Rename this parameter \"X\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 32,
"endOffset": 24,
"startLine": 32,
"startOffset": 23
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 32,
"message": "Rename this parameter \"Y\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 32,
"endOffset": 27,
"startLine": 32,
"startOffset": 26
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 44,
"message": "Rename this parameter \"X\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 44,
"endOffset": 20,
"startLine": 44,
"startOffset": 19
},
"type": "CODE_SMELL"
}
] | 585 |
python/tvm/relay/op/vm/vm.py
|
XiaoSong9905/tvm
| 4,640 |
11348370
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks
"""Dialect operators for Relay VM."""
from . import _ffi_api
def shape_of(expr):
"""Invoke a function to get the shape of a tensor.
Parameters
----------
expr : tvm.relay.Expr
The expr used to evaluate its tensor shape.
Returns
-------
result : tvm.relay.Expr
The expression with the evaluated tensor shape.
"""
return _ffi_api.shape_of(expr)
def invoke_tvm_op(func, inputs, outputs):
"""Call a primitive function with the TVM operator calling convention.
Parameters
----------
func : tvm.relay.Expr
The input expr.
inputs : tvm.relay.Expr
A tuple of the inputs to pass to the TVM function.
outputs : tvm.relay.Expr
A tuple of the outputs to pass to the TVM function.
Returns
-------
result : tvm.relay.Expr
The invoke_tvm_op call node.
"""
return _ffi_api.invoke_tvm_op(func, inputs, outputs)
def shape_func(func, inputs, outputs, is_inputs):
"""Invoke the shape function of the passed function.
Parameters
----------
func : tvm.relay.Expr
The primitive function from which to compute the shape function.
inputs : tvm.relay.Tuple
The tupled inputs.
outputs : tvm.relay.Tuple
The tupled outputs.
is_inputs : List[bool]
A boolean list indicating whether the shape function should expect
shape or input at each position.
Returns
-------
result : tvm.relay.Expr
The shape function expression.
"""
return _ffi_api.shape_func(func, inputs, outputs, is_inputs)
def reshape_tensor(data, shape, newshape):
"""Invoke the VM ReshapeTensor instruction.
Parameters
----------
data : tvm.relay.Expr
The input data.
shape : tvm.relay.Expr
The newshape tensor.
newshape : List[tvm.ir.PrimExpr]
The new shape.
"""
return _ffi_api.reshape_tensor(data, shape, newshape)
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
4
|
[] | 7,172 |
tests/sanic/conftest.py
|
TheVinhLuong102/Strawberry
| 2,062 |
11458546
|
<gh_stars>1000+
import pytest
from .app import create_app
@pytest.fixture
def sanic_client():
yield create_app()
| null | null | null | null | null | null | null | null | null |
[] | 9,616 |
libs/boxes/roi.py
|
wesen/FastMaskRCNN
| 3,509 |
11339840
|
<gh_stars>1000+
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
def roi_align(feat, boxes):
"""Given features and boxes, This function crops feature """
return
def roi_cropping(feat, boxes, clses, anchors, spatial_scale=1.0/16):
"""This function computes final rpn boxes
And crops areas from the incoming features
"""
return
|
8
|
0
|
0.0
|
0
|
0
|
8
|
2.0
|
0
|
2
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 10,
"message": "Remove the unused function parameter \"feat\".",
"textRange": {
"endLine": 10,
"endOffset": 18,
"startLine": 10,
"startOffset": 14
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 10,
"message": "Remove the unused function parameter \"boxes\".",
"textRange": {
"endLine": 10,
"endOffset": 25,
"startLine": 10,
"startOffset": 20
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 12,
"message": "Remove this redundant return.",
"textRange": {
"endLine": 12,
"endOffset": 8,
"startLine": 12,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 14,
"message": "Remove the unused function parameter \"feat\".",
"textRange": {
"endLine": 14,
"endOffset": 21,
"startLine": 14,
"startOffset": 17
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 14,
"message": "Remove the unused function parameter \"clses\".",
"textRange": {
"endLine": 14,
"endOffset": 35,
"startLine": 14,
"startOffset": 30
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 14,
"message": "Remove the unused function parameter \"anchors\".",
"textRange": {
"endLine": 14,
"endOffset": 44,
"startLine": 14,
"startOffset": 37
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 14,
"message": "Remove the unused function parameter \"spatial_scale\".",
"textRange": {
"endLine": 14,
"endOffset": 66,
"startLine": 14,
"startOffset": 46
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 18,
"message": "Remove this redundant return.",
"textRange": {
"endLine": 18,
"endOffset": 8,
"startLine": 18,
"startOffset": 2
},
"type": "CODE_SMELL"
}
] | 7,121 |
desktop/core/ext-py/repoze.who-2.3/repoze/who/plugins/auth_tkt.py
|
kokosing/hue
| 5,079 |
1667072
|
import datetime
from codecs import utf_8_decode
from codecs import utf_8_encode
import hashlib
import os
import time
from wsgiref.handlers import _monthname # Locale-independent, RFC-2616
from wsgiref.handlers import _weekdayname # Locale-independent, RFC-2616
try:
from urllib.parse import urlencode, parse_qsl
except ImportError:
from urllib import urlencode
from urlparse import parse_qsl
from zope.interface import implementer
from repoze.who.interfaces import IIdentifier
from repoze.who.interfaces import IAuthenticator
from repoze.who._compat import get_cookies
import repoze.who._auth_tkt as auth_tkt
from repoze.who._compat import STRING_TYPES
_UTCNOW = None # unit tests can replace
def _utcnow(): #pragma NO COVERAGE
if _UTCNOW is not None:
return _UTCNOW
return datetime.datetime.utcnow()
@implementer(IIdentifier, IAuthenticator)
class AuthTktCookiePlugin(object):
userid_typename = 'userid_type'
userid_type_decoders = {'int': int,
'unicode': lambda x: utf_8_decode(x)[0],
}
userid_type_encoders = {int: ('int', str),
}
try:
userid_type_encoders[long] = ('int', str)
except NameError: #pragma NO COVER Python >= 3.0
pass
try:
userid_type_encoders[unicode] = ('unicode',
lambda x: utf_8_encode(x)[0])
except NameError: #pragma NO COVER Python >= 3.0
pass
def __init__(self, secret, cookie_name='auth_tkt',
secure=False, include_ip=False,
timeout=None, reissue_time=None, userid_checker=None,
digest_algo=auth_tkt.DEFAULT_DIGEST):
self.secret = secret
self.cookie_name = cookie_name
self.include_ip = include_ip
self.secure = secure
if timeout and ( (not reissue_time) or (reissue_time > timeout) ):
raise ValueError('When timeout is specified, reissue_time must '
'be set to a lower value')
self.timeout = timeout
self.reissue_time = reissue_time
self.userid_checker = userid_checker
self.digest_algo = digest_algo
# IIdentifier
def identify(self, environ):
cookies = get_cookies(environ)
cookie = cookies.get(self.cookie_name)
if cookie is None or not cookie.value:
return None
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
try:
timestamp, userid, tokens, user_data = auth_tkt.parse_ticket(
self.secret, cookie.value, remote_addr, self.digest_algo)
except auth_tkt.BadTicket:
return None
if self.timeout and ( (timestamp + self.timeout) < time.time() ):
return None
user_data_dict = dict(parse_qsl(user_data))
userid_type = user_data_dict.get(self.userid_typename)
if userid_type:
decoder = self.userid_type_decoders.get(userid_type)
if decoder:
userid = decoder(userid)
environ['REMOTE_USER_TOKENS'] = tokens
environ['REMOTE_USER_DATA'] = user_data
environ['AUTH_TYPE'] = 'cookie'
identity = {}
identity['timestamp'] = timestamp
identity['repoze.who.plugins.auth_tkt.userid'] = userid
identity['tokens'] = tokens
identity['userdata'] = user_data_dict
return identity
# IIdentifier
def forget(self, environ, identity):
# return a set of expires Set-Cookie headers
return self._get_cookies(environ, 'INVALID', 0)
# IIdentifier
def remember(self, environ, identity):
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
cookies = get_cookies(environ)
old_cookie = cookies.get(self.cookie_name)
existing = cookies.get(self.cookie_name)
old_cookie_value = getattr(existing, 'value', None)
max_age = identity.get('max_age', None)
timestamp, userid, tokens, userdata = None, '', (), ''
if old_cookie_value:
try:
timestamp,userid,tokens,userdata = auth_tkt.parse_ticket(
self.secret, old_cookie_value, remote_addr,
self.digest_algo)
except auth_tkt.BadTicket:
pass
tokens = tuple(tokens)
who_userid = identity['repoze.who.userid']
who_tokens = tuple(identity.get('tokens', ()))
who_userdata_dict = identity.get('userdata', {})
encoding_data = self.userid_type_encoders.get(type(who_userid))
if encoding_data:
encoding, encoder = encoding_data
who_userid = encoder(who_userid)
who_userdata_dict[self.userid_typename] = encoding
who_userdata = urlencode(who_userdata_dict)
old_data = (userid, tokens, userdata)
new_data = (who_userid, who_tokens, who_userdata)
if old_data != new_data or (self.reissue_time and
( (timestamp + self.reissue_time) < time.time() )):
ticket = auth_tkt.AuthTicket(
self.secret,
who_userid,
remote_addr,
tokens=who_tokens,
user_data=who_userdata,
cookie_name=self.cookie_name,
secure=self.secure,
digest_algo=self.digest_algo)
new_cookie_value = ticket.cookie_value()
if old_cookie_value != new_cookie_value:
# return a set of Set-Cookie headers
return self._get_cookies(environ, new_cookie_value, max_age)
# IAuthenticator
def authenticate(self, environ, identity):
userid = identity.get('repoze.who.plugins.auth_tkt.userid')
if userid is None:
return None
if self.userid_checker and not self.userid_checker(userid):
return None
identity['repoze.who.userid'] = userid
return userid
def _get_cookies(self, environ, value, max_age=None):
if max_age is not None:
max_age = int(max_age)
later = _utcnow() + datetime.timedelta(seconds=max_age)
# Wdy, DD-Mon-YY HH:MM:SS GMT
expires = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[later.weekday()],
later.day,
_monthname[later.month],
later.year,
later.hour,
later.minute,
later.second,
)
# the Expires header is *required* at least for IE7 (IE7 does
# not respect Max-Age)
max_age = "; Max-Age=%s; Expires=%s" % (max_age, expires)
else:
max_age = ''
secure = ''
if self.secure:
secure = '; secure; HttpOnly'
cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
cur_domain = cur_domain.split(':')[0] # drop port
wild_domain = '.' + cur_domain
cookies = [
('Set-Cookie', '%s="%s"; Path=/%s%s' % (
self.cookie_name, value, max_age, secure)),
('Set-Cookie', '%s="%s"; Path=/; Domain=%s%s%s' % (
self.cookie_name, value, cur_domain, max_age, secure)),
('Set-Cookie', '%s="%s"; Path=/; Domain=%s%s%s' % (
self.cookie_name, value, wild_domain, max_age, secure))
]
return cookies
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
id(self)) #pragma NO COVERAGE
def _bool(value):
if isinstance(value, STRING_TYPES):
return value.lower() in ('yes', 'true', '1')
return value
def make_plugin(secret=None,
secretfile=None,
cookie_name='auth_tkt',
secure=False,
include_ip=False,
timeout=None,
reissue_time=None,
userid_checker=None,
digest_algo=auth_tkt.DEFAULT_DIGEST,
):
from repoze.who.utils import resolveDotted
if (secret is None and secretfile is None):
raise ValueError("One of 'secret' or 'secretfile' must not be None.")
if (secret is not None and secretfile is not None):
raise ValueError("Specify only one of 'secret' or 'secretfile'.")
if secretfile:
secretfile = os.path.abspath(os.path.expanduser(secretfile))
if not os.path.exists(secretfile):
raise ValueError("No such 'secretfile': %s" % secretfile)
with open(secretfile) as f:
secret = f.read().strip()
if timeout:
timeout = int(timeout)
if reissue_time:
reissue_time = int(reissue_time)
if userid_checker is not None:
userid_checker = resolveDotted(userid_checker)
if isinstance(digest_algo, str):
try:
digest_algo = getattr(hashlib, digest_algo)
except AttributeError:
raise ValueError("No such 'digest_algo': %s" % digest_algo)
plugin = AuthTktCookiePlugin(secret,
cookie_name,
_bool(secure),
_bool(include_ip),
timeout,
reissue_time,
userid_checker,
digest_algo,
)
return plugin
|
3
|
0
|
0.0
|
48
|
0
|
3
|
1.0
|
0
|
44
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 106,
"message": "Remove the unused function parameter \"identity\".",
"textRange": {
"endLine": 106,
"endOffset": 38,
"startLine": 106,
"startOffset": 30
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 118,
"message": "Remove the unused local variable \"old_cookie\".",
"textRange": {
"endLine": 118,
"endOffset": 18,
"startLine": 118,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 167,
"message": "Remove the unused function parameter \"environ\".",
"textRange": {
"endLine": 167,
"endOffset": 34,
"startLine": 167,
"startOffset": 27
},
"type": "CODE_SMELL"
}
] | 1,454 |
bokeh/server/django/__init__.py
|
g-parki/bokeh
| 15,193 |
6771486
|
<filename>bokeh/server/django/__init__.py
# Bokeh imports
from bokeh.util.dependencies import import_required
# Bokeh imports
from .apps import DjangoBokehConfig
from .routing import autoload, directory, document
from .static import static_extensions
import_required("django", "django is required by bokeh.server.django")
import_required("channels", "The package channels is required by bokeh.server.django and must be installed")
default_app_config = "bokeh.server.django.DjangoBokehConfig"
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 9,179 |
packages/python/plotly/plotly/tests/test_optional/test_px/test_colors.py
|
mastermind88/plotly.py
| 11,750 |
4327666
|
<gh_stars>1000+
import plotly.express as px
def test_reversed_colorscale():
fig1 = px.scatter(
x=[1, 2], y=[2, 3], color=[3, 4], color_continuous_scale="plasma_r"
)
fig2 = px.scatter(x=[1, 2], y=[2, 3], color=[3, 4], color_continuous_scale="plasma")
colors1 = [val[1] for val in fig1.layout.coloraxis.colorscale]
colors2 = [val[1] for val in fig2.layout.coloraxis.colorscale]
assert colors1 == colors2[::-1]
fig1 = px.scatter(
x=[1, 2],
y=[2, 3],
color=[3, 4],
color_continuous_scale=px.colors.sequential.Plasma,
)
fig2 = px.scatter(
x=[1, 2],
y=[2, 3],
color=[3, 4],
color_continuous_scale=px.colors.sequential.Plasma_r,
)
colors1 = [val[1] for val in fig1.layout.coloraxis.colorscale]
colors2 = [val[1] for val in fig2.layout.coloraxis.colorscale]
assert colors1 == colors2[::-1]
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
1
|
[] | 13,235 |
apps/opencv_stitching_tool/opencv_stitching/image_handler.py
|
nowireless/opencv
| 56,632 |
11523688
|
<reponame>nowireless/opencv
import cv2 as cv
from .megapix_downscaler import MegapixDownscaler
from .stitching_error import StitchingError
class ImageHandler:
DEFAULT_MEDIUM_MEGAPIX = 0.6
DEFAULT_LOW_MEGAPIX = 0.1
DEFAULT_FINAL_MEGAPIX = -1
def __init__(self,
medium_megapix=DEFAULT_MEDIUM_MEGAPIX,
low_megapix=DEFAULT_LOW_MEGAPIX,
final_megapix=DEFAULT_FINAL_MEGAPIX):
if medium_megapix < low_megapix:
raise StitchingError("Medium resolution megapix need to be "
"greater or equal than low resolution "
"megapix")
self.medium_scaler = MegapixDownscaler(medium_megapix)
self.low_scaler = MegapixDownscaler(low_megapix)
self.final_scaler = MegapixDownscaler(final_megapix)
self.scales_set = False
self.img_names = []
self.img_sizes = []
def set_img_names(self, img_names):
self.img_names = img_names
def resize_to_medium_resolution(self):
return self.read_and_resize_imgs(self.medium_scaler)
def resize_to_low_resolution(self, medium_imgs=None):
if medium_imgs and self.scales_set:
return self.resize_medium_to_low(medium_imgs)
return self.read_and_resize_imgs(self.low_scaler)
def resize_to_final_resolution(self):
return self.read_and_resize_imgs(self.final_scaler)
def read_and_resize_imgs(self, scaler):
for img, size in self.input_images():
yield self.resize_img_by_scaler(scaler, size, img)
def resize_medium_to_low(self, medium_imgs):
for img, size in zip(medium_imgs, self.img_sizes):
yield self.resize_img_by_scaler(self.low_scaler, size, img)
@staticmethod
def resize_img_by_scaler(scaler, size, img):
desired_size = scaler.get_scaled_img_size(size)
return cv.resize(img, desired_size,
interpolation=cv.INTER_LINEAR_EXACT)
def input_images(self):
self.img_sizes = []
for name in self.img_names:
img = self.read_image(name)
size = self.get_image_size(img)
self.img_sizes.append(size)
self.set_scaler_scales()
yield img, size
@staticmethod
def get_image_size(img):
"""(width, height)"""
return (img.shape[1], img.shape[0])
@staticmethod
def read_image(img_name):
img = cv.imread(img_name)
if img is None:
raise StitchingError("Cannot read image " + img_name)
return img
def set_scaler_scales(self):
if not self.scales_set:
first_img_size = self.img_sizes[0]
self.medium_scaler.set_scale_by_img_size(first_img_size)
self.low_scaler.set_scale_by_img_size(first_img_size)
self.final_scaler.set_scale_by_img_size(first_img_size)
self.scales_set = True
def get_medium_to_final_ratio(self):
return self.final_scaler.scale / self.medium_scaler.scale
def get_medium_to_low_ratio(self):
return self.low_scaler.scale / self.medium_scaler.scale
def get_final_to_low_ratio(self):
return self.low_scaler.scale / self.final_scaler.scale
|
0
|
0
|
0.0
|
8
|
0
|
0
|
1.0
|
0
|
23
|
[] | 9,912 |
poem/cv_mim/pipelines.py
|
DionysisChristopoulos/google-research
| 23,901 |
11274825
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements pipeline utility functions."""
import math
import tensorflow as tf
from poem.core import common
from poem.core import input_generator
from poem.core import keypoint_utils
from poem.core import tfe_input_layer
def create_dataset_from_tables(
input_table_patterns,
batch_sizes,
num_instances_per_record,
shuffle=False,
num_epochs=None,
drop_remainder=False,
keypoint_names_3d=None,
keypoint_names_2d=None,
feature_dim=None,
num_classes=None,
num_frames=None,
shuffle_buffer_size=4096,
num_shards=1,
shard_index=None,
common_module=common,
dataset_class=tf.data.TFRecordDataset,
input_example_parser_creator=tfe_input_layer.create_tfe_parser,
seed=None):
"""Reads data from tf.Example table.
Note that this function mainly duplicates `read_batch_from_tfe_tables` in
`v1.pipeline_utils.py` for compatible with tf2.
IMPORTANT: We assume that 2D keypoints from the input have been normalized by
image size. No normalization is expected and no denormalization will be
performed for both 2D and 3D keypoints.
Args:
input_table_patterns: A list of strings for the paths or pattern to input
tables.
batch_sizes: A list of integers for the batch sizes to read from each table.
num_instances_per_record: An integer for the number of instances per
tf.Example record.
shuffle: A boolean for whether to shuffle batch.
num_epochs: An integer for the number of epochs to read. Use `None` to read
indefinitely.
drop_remainder: A boolean for whether to drop remainder batch.
keypoint_names_3d: A list of strings for 3D keypoint names to read
(coordinates). Use None to skip reading 2D keypoints.
keypoint_names_2d: A list of strings for 2D keypoint names to read
(coordinates and scores). Use None to skip reading 2D keypoints.
feature_dim: An integer for size of pre-computed feature vectors. Use None
to skip reading feature vectors.
num_classes: An integer for total number of classification label classes to
read labels for. Use None to skip reading class labels.
num_frames: An integer for the number of frames per object each example has.
Use None to skip adding the frame dimension.
shuffle_buffer_size: An integer for the buffer size used for shuffling. A
large buffer size benefits shuffling quality.
num_shards: An integer for the number of shards to divide the dataset. This
is useful to distributed training. See `tf.data.Dataset.shard` for
details.
shard_index: An integer for the shard index to use. This is useful to
distributed training, and should usually be set to the id of a
synchronized worker. See `tf.data.Dataset.shard` for details. Note this
must be specified if `num_shards` is greater than 1.
common_module: A Python module that defines common constants.
dataset_class: A dataset class to use. Must match input table type.
input_example_parser_creator: A function handle for creating parser
function.
seed: An integer for random seed.
Returns:
A tf.data.Dataset object.
"""
parser_kwargs = {
'num_objects': num_instances_per_record,
}
if keypoint_names_3d:
parser_kwargs.update({
'keypoint_names_3d': keypoint_names_3d,
'include_keypoint_scores_3d': False,
})
if keypoint_names_2d:
parser_kwargs.update({
'keypoint_names_2d': keypoint_names_2d,
'include_keypoint_scores_2d': True,
})
if feature_dim:
parser_kwargs.update({
'feature_dim': feature_dim,
})
if num_classes:
parser_kwargs.update({
'num_classes': num_classes,
})
if num_frames:
parser_kwargs.update({
'sequence_length': num_frames,
})
parser_fn = input_example_parser_creator(
common_module=common_module, **parser_kwargs)
dataset = tfe_input_layer.read_batch_from_tables(
input_table_patterns,
batch_sizes=batch_sizes,
drop_remainder=drop_remainder,
num_epochs=num_epochs,
num_shards=num_shards,
shard_index=shard_index,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
seed=seed,
dataset_class=dataset_class,
parser_fn=parser_fn)
return dataset
def create_model_input(inputs,
model_input_keypoint_type,
keypoint_profile_2d=None,
keypoint_profile_3d=None,
normalize_keypoints_2d=True,
min_keypoint_score_2d=-1.0,
azimuth_range=(-math.pi, math.pi),
elevation_range=(-math.pi / 6.0, math.pi / 6.0),
roll_range=(-math.pi / 6.0, math.pi / 6.0),
seed=None):
"""Creates model input features (2D keypoints) from input keypoints.
Note that this function mainly duplicates `create_model_input` in
`v1.input_generator.py` for compatible with tf2.
IMPORTANT: We assume that 2D keypoints from the inputs have been normalized by
image size. This function will reads image sizes from the input and
denormalize the 2D keypoints with them. No normalization is expected and no
denormalization will be performed for 3D keypoints.
Args:
inputs: A dictionary for tensor inputs.
model_input_keypoint_type: An enum string for model input keypoint type. See
`MODEL_INPUT_KEYPOINT_TYPE_*` for supported values.
keypoint_profile_2d: A KeypointProfile2D object for input 2D keypoints.
Required for normalizing 2D keypoints. Also required when 3D-to-2D
projection is involved.
keypoint_profile_3d: A KeypointProfile3D object for input 3D keypoints. Only
used when 3D-to-2D projection is involved.
normalize_keypoints_2d: A boolean for whether to normalize 2D keypoints at
the end.
min_keypoint_score_2d: A float for the minimum score to consider a 2D
keypoint as invalid.
azimuth_range: A tuple for minimum and maximum azimuth angles to randomly
rotate 3D keypoints with.
elevation_range: A tuple for minimum and maximum elevation angles to
randomly rotate 3D keypoints with.
roll_range: A tuple for minimum and maximum roll angles to randomly rotate
3D keypoints with.
seed: An integer for random seed.
Returns:
features: A tensor for input features. Shape = [..., feature_dim].
side_outputs: A dictionary for side outputs, which includes
`offset_points_2d` (shape = [..., 1, 2]) and `scale_distances_2d` (shape =
[..., 1, 1]) if `normalize_keypoints_2d` is True.
"""
keypoints_2d = keypoint_utils.denormalize_points_by_image_size(
inputs[common.KEY_KEYPOINTS_2D],
image_sizes=inputs[common.KEY_IMAGE_SIZES])
keypoint_scores_2d = inputs[common.KEY_KEYPOINT_SCORES_2D]
if min_keypoint_score_2d < 0.0:
keypoint_masks_2d = tf.ones_like(keypoint_scores_2d, dtype=tf.float32)
else:
keypoint_masks_2d = tf.cast(
tf.math.greater_equal(keypoint_scores_2d, min_keypoint_score_2d),
dtype=tf.float32)
keypoints_3d = inputs.get(common.KEY_KEYPOINTS_3D, None)
features, side_outputs = input_generator.create_model_input(
keypoints_2d,
keypoint_masks_2d,
keypoints_3d,
model_input_keypoint_type,
normalize_keypoints_2d=normalize_keypoints_2d,
keypoint_profile_2d=keypoint_profile_2d,
keypoint_profile_3d=keypoint_profile_3d,
azimuth_range=azimuth_range,
elevation_range=elevation_range,
roll_range=roll_range,
seed=seed)
# IMPORTANT: It is better not to modify `inputs` in TF2. Instead, we save
# results in the `side_outputs` for further computation.
side_outputs.update({
common.KEY_KEYPOINTS_2D: keypoints_2d,
common.KEY_KEYPOINT_MASKS_2D: keypoint_masks_2d
})
return features, side_outputs
|
1
|
0
|
0.0
|
7
|
0
|
1
|
1.0
|
0
|
8
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 29,
"message": "Function \"create_dataset_from_tables\" has 18 parameters, which is greater than the 13 authorized.",
"textRange": {
"endLine": 46,
"endOffset": 13,
"startLine": 29,
"startOffset": 4
},
"type": "CODE_SMELL"
}
] | 6,733 |
torch_geometric/nn/acts.py
|
NucciTheBoss/pytorch_geometric
| 12,651 |
178744
|
<reponame>NucciTheBoss/pytorch_geometric<gh_stars>1000+
def swish(x):
return x * x.sigmoid()
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
1
|
[] | 1,099 |
recipes/Python/578535_lndirpy_short_pythversiBSDX11_lndir/recipe-578535.py
|
tdiprima/code
| 2,023 |
4071122
|
#!/usr/bin/env python
################################################################################
# #
# Copyright (c) 2013, Mike 'Fuzzy' Partin <<EMAIL>> #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #
# The views and conclusions contained in the software and documentation are #
# those of the authors and should not be interpreted as representing official #
# policies, either expressed or implied, of the FreeBSD Project. #
# #
################################################################################
################################################################################
### Module imports ###
################################################################################
# Stdlib
import os
import re
import sys
import types
################################################################################
### Main logic and argument handling ###
################################################################################
try:
if __name__ == '__main__':
### check to see that all args are present on the command line
##############################################################
if len(sys.argv) < 3:
print("Usage: %s <src>/ <dst>/" % sys.argv[0])
sys.exit(1)
else:
### check to see that source and destination targets exist
##########################################################
for i in [str(sys.argv[1]), str(sys.argv[2])]:
if not os.path.isdir(i):
raise OSError("ERROR: %s is not a valid directory." % i)
### Setup some convenience
src = str(sys.argv[1])
dst = str(sys.argv[2])
src_b = None
if len(sys.argv) == 4:
src_b = sys.argv[3]
if src_b == None:
if src[-1:] == '/':
src_b = os.path.basename(src[:-1])
else:
src_b = os.path.basename(src)
### start walking the source target
###################################
dirs_c = 0 # counter for dires
file_c = 0 # counter for files
for root, dirs, files in os.walk(src):
for i in files:
os.symlink('%s/%s' % (root, i),
'%s%s/%s' % (dst, re.sub(src, '', root), i))
file_c += 1
for i in dirs:
try:
os.mkdir('%s%s/%s' % (dst, re.sub(src, '', root), i))
except OSError:
pass
dirs_c += 1
sys.stdout.write('[1;32m>[0m %-53s %6d dirs %6d files\r' % (
src_b[:52], # basename of src
dirs_c, # Dir count
file_c)) # File count
sys.stdout.flush()
sys.stdout.write('[1;32m>[0m %-53s %6d dirs %6d files\n' % (
src_b[:52], # basename of src
dirs_c, # Dir count
file_c)) # File count
sys.stdout.flush()
except OSError as msg:
print(msg)
sys.exit(0)
|
1
|
0
|
0.0
|
39
|
0
|
1
|
1.0
|
0
|
10
|
[
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 74,
"message": "Use `endswith` here.",
"textRange": {
"endLine": 74,
"endOffset": 26,
"startLine": 74,
"startOffset": 11
},
"type": "CODE_SMELL"
}
] | 12,834 |
tests/components/image_processing/test_init.py
|
MrDelik/core
| 30,023 |
6472508
|
<reponame>MrDelik/core<gh_stars>1000+
"""The tests for the image_processing component."""
from unittest.mock import PropertyMock, patch
import pytest
import homeassistant.components.http as http
import homeassistant.components.image_processing as ip
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, async_capture_events
from tests.components.image_processing import common
@pytest.fixture
def aiohttp_unused_port(loop, aiohttp_unused_port, socket_enabled):
"""Return aiohttp_unused_port and allow opening sockets."""
return aiohttp_unused_port
def get_url(hass):
"""Return camera url."""
state = hass.states.get("camera.demo_camera")
return f"{hass.config.internal_url}{state.attributes.get(ATTR_ENTITY_PICTURE)}"
async def setup_image_processing(hass, aiohttp_unused_port):
"""Set up things to be run when tests are started."""
await async_setup_component(
hass,
http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: aiohttp_unused_port()}},
)
config = {ip.DOMAIN: {"platform": "test"}, "camera": {"platform": "demo"}}
await async_setup_component(hass, ip.DOMAIN, config)
await hass.async_block_till_done()
async def setup_image_processing_alpr(hass):
"""Set up things to be run when tests are started."""
config = {ip.DOMAIN: {"platform": "demo"}, "camera": {"platform": "demo"}}
await async_setup_component(hass, ip.DOMAIN, config)
await hass.async_block_till_done()
return async_capture_events(hass, "image_processing.found_plate")
async def setup_image_processing_face(hass):
"""Set up things to be run when tests are started."""
config = {ip.DOMAIN: {"platform": "demo"}, "camera": {"platform": "demo"}}
await async_setup_component(hass, ip.DOMAIN, config)
await hass.async_block_till_done()
return async_capture_events(hass, "image_processing.detect_face")
async def test_setup_component(hass):
"""Set up demo platform on image_process component."""
config = {ip.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, ip.DOMAIN):
assert await async_setup_component(hass, ip.DOMAIN, config)
async def test_setup_component_with_service(hass):
"""Set up demo platform on image_process component test service."""
config = {ip.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, ip.DOMAIN):
assert await async_setup_component(hass, ip.DOMAIN, config)
assert hass.services.has_service(ip.DOMAIN, "scan")
@patch(
"homeassistant.components.demo.camera.Path.read_bytes",
return_value=b"Test",
)
async def test_get_image_from_camera(
mock_camera_read, hass, aiohttp_unused_port, enable_custom_integrations
):
"""Grab an image from camera entity."""
await setup_image_processing(hass, aiohttp_unused_port)
common.async_scan(hass, entity_id="image_processing.test")
await hass.async_block_till_done()
state = hass.states.get("image_processing.test")
assert mock_camera_read.called
assert state.state == "1"
assert state.attributes["image"] == b"Test"
@patch(
"homeassistant.components.camera.async_get_image",
side_effect=HomeAssistantError(),
)
async def test_get_image_without_exists_camera(
mock_image, hass, aiohttp_unused_port, enable_custom_integrations
):
"""Try to get image without exists camera."""
await setup_image_processing(hass, aiohttp_unused_port)
hass.states.async_remove("camera.demo_camera")
common.async_scan(hass, entity_id="image_processing.test")
await hass.async_block_till_done()
state = hass.states.get("image_processing.test")
assert mock_image.called
assert state.state == "0"
async def test_alpr_event_single_call(hass, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
alpr_events = await setup_image_processing_alpr(hass)
aioclient_mock.get(get_url(hass), content=b"image")
common.async_scan(hass, entity_id="image_processing.demo_alpr")
await hass.async_block_till_done()
state = hass.states.get("image_processing.demo_alpr")
assert len(alpr_events) == 4
assert state.state == "AC3829"
event_data = [
event.data for event in alpr_events if event.data.get("plate") == "AC3829"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "AC3829"
assert event_data[0]["confidence"] == 98.3
assert event_data[0]["entity_id"] == "image_processing.demo_alpr"
async def test_alpr_event_double_call(hass, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
alpr_events = await setup_image_processing_alpr(hass)
aioclient_mock.get(get_url(hass), content=b"image")
common.async_scan(hass, entity_id="image_processing.demo_alpr")
common.async_scan(hass, entity_id="image_processing.demo_alpr")
await hass.async_block_till_done()
state = hass.states.get("image_processing.demo_alpr")
assert len(alpr_events) == 4
assert state.state == "AC3829"
event_data = [
event.data for event in alpr_events if event.data.get("plate") == "AC3829"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "AC3829"
assert event_data[0]["confidence"] == 98.3
assert event_data[0]["entity_id"] == "image_processing.demo_alpr"
@patch(
"homeassistant.components.demo.image_processing.DemoImageProcessingAlpr.confidence",
new_callable=PropertyMock(return_value=95),
)
async def test_alpr_event_single_call_confidence(confidence_mock, hass, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
alpr_events = await setup_image_processing_alpr(hass)
aioclient_mock.get(get_url(hass), content=b"image")
common.async_scan(hass, entity_id="image_processing.demo_alpr")
await hass.async_block_till_done()
state = hass.states.get("image_processing.demo_alpr")
assert len(alpr_events) == 2
assert state.state == "AC3829"
event_data = [
event.data for event in alpr_events if event.data.get("plate") == "AC3829"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "AC3829"
assert event_data[0]["confidence"] == 98.3
assert event_data[0]["entity_id"] == "image_processing.demo_alpr"
async def test_face_event_call(hass, aioclient_mock):
"""Set up and scan a picture and test faces from event."""
face_events = await setup_image_processing_face(hass)
aioclient_mock.get(get_url(hass), content=b"image")
common.async_scan(hass, entity_id="image_processing.demo_face")
await hass.async_block_till_done()
state = hass.states.get("image_processing.demo_face")
assert len(face_events) == 2
assert state.state == "Hans"
assert state.attributes["total_faces"] == 4
event_data = [
event.data for event in face_events if event.data.get("name") == "Hans"
]
assert len(event_data) == 1
assert event_data[0]["name"] == "Hans"
assert event_data[0]["confidence"] == 98.34
assert event_data[0]["gender"] == "male"
assert event_data[0]["entity_id"] == "image_processing.demo_face"
@patch(
"homeassistant.components.demo.image_processing."
"DemoImageProcessingFace.confidence",
new_callable=PropertyMock(return_value=None),
)
async def test_face_event_call_no_confidence(mock_config, hass, aioclient_mock):
"""Set up and scan a picture and test faces from event."""
face_events = await setup_image_processing_face(hass)
aioclient_mock.get(get_url(hass), content=b"image")
common.async_scan(hass, entity_id="image_processing.demo_face")
await hass.async_block_till_done()
state = hass.states.get("image_processing.demo_face")
assert len(face_events) == 3
assert state.state == "4"
assert state.attributes["total_faces"] == 4
event_data = [
event.data for event in face_events if event.data.get("name") == "Hans"
]
assert len(event_data) == 1
assert event_data[0]["name"] == "Hans"
assert event_data[0]["confidence"] == 98.34
assert event_data[0]["gender"] == "male"
assert event_data[0]["entity_id"] == "image_processing.demo_face"
|
8
|
5
|
0.0
|
0
|
0
|
3
|
1.0
|
0
|
19
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 90,
"message": "Define a constant instead of duplicating this literal \"image_processing.test\" 4 times.",
"textRange": {
"endLine": 90,
"endOffset": 61,
"startLine": 90,
"startOffset": 38
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 126,
"message": "Define a constant instead of duplicating this literal \"image_processing.demo_alpr\" 10 times.",
"textRange": {
"endLine": 126,
"endOffset": 66,
"startLine": 126,
"startOffset": 38
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 139,
"message": "Do not perform equality checks with floating point values.",
"textRange": {
"endLine": 139,
"endOffset": 46,
"startLine": 139,
"startOffset": 11
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 162,
"message": "Do not perform equality checks with floating point values.",
"textRange": {
"endLine": 162,
"endOffset": 46,
"startLine": 162,
"startOffset": 11
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 188,
"message": "Do not perform equality checks with floating point values.",
"textRange": {
"endLine": 188,
"endOffset": 46,
"startLine": 188,
"startOffset": 11
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 197,
"message": "Define a constant instead of duplicating this literal \"image_processing.demo_face\" 6 times.",
"textRange": {
"endLine": 197,
"endOffset": 66,
"startLine": 197,
"startOffset": 38
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 211,
"message": "Do not perform equality checks with floating point values.",
"textRange": {
"endLine": 211,
"endOffset": 47,
"startLine": 211,
"startOffset": 11
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 240,
"message": "Do not perform equality checks with floating point values.",
"textRange": {
"endLine": 240,
"endOffset": 47,
"startLine": 240,
"startOffset": 11
},
"type": "BUG"
}
] | 4,150 |
pipenv/patched/piptools/repositories/__init__.py
|
Enzime/pipenv
| 18,636 |
9671823
|
<gh_stars>1000+
# flake8: noqa
from .local import LocalRequirementsRepository
from .pypi import PyPIRepository
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 5,906 |
torch/autograd/profiler.py
|
xiaohanhuang/pytorch
| 60,067 |
11457937
|
<gh_stars>1000+
from torch.autograd.profiler_util import (
EventList, FunctionEvent, MemRecordsAcc, MEMORY_EVENT_NAME,
_filter_name, _filter_stack_entry, _rewrite_name
)
from torch.autograd import (
DeviceType, ProfilerActivity, ProfilerConfig, ProfilerState,
kineto_available, _ProfilerResult, _disable_profiler, _enable_profiler,
_prepare_profiler, _supported_activities
)
import torch
import torch.cuda
from torch.futures import Future
from typing import Any, Dict, List, Optional
from warnings import warn
try:
# Available in Python >= 3.2
from contextlib import ContextDecorator
except ImportError:
import functools
class ContextDecorator(object): # type: ignore[no-redef]
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
def __call__(self, func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapped
class profile(object):
"""Context manager that manages autograd profiler state and holds a summary of results.
Under the hood it just records events of functions being executed in C++ and
exposes those events to Python. You can wrap any code into it and it will
only report runtime of PyTorch functions.
Note: profiler is thread local and is automatically propagated into the async tasks
Args:
enabled (bool, optional): Setting this to False makes this context manager a no-op.
use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.
Adds approximately 4us of overhead to each tensor operation.
record_shapes (bool, optional): If shapes recording is set, information
about input dimensions will be collected. This allows one to see which
dimensions have been used under the hood and further group by them
using prof.key_averages(group_by_input_shape=True). Please note that
shape recording might skew your profiling data. It is recommended to
use separate runs with and without shape recording to validate the timing.
Most likely the skew will be negligible for bottom most events (in a case
of nested function calls). But for higher level functions the total
self cpu time might be artificially increased because of the shape
collection.
with_flops (bool, optional): If with_flops is set, the profiler will estimate
the FLOPs (floating point operations) value using the operator's input shape.
This allows one to estimate the hardware performance. Currently,
this option only works for the matrix multiplication and 2D convolution operators.
profile_memory (bool, optional): track tensor memory allocation/deallocation.
with_stack (bool, optional): record source information (file and line number) for the ops.
with_modules (bool): record module hierarchy (including function names)
corresponding to the callstack of the op. e.g. If module A's forward call's
module B's forward which contains an aten::add op,
then aten::add's module hierarchy is A.B
Note that this support exist, at the moment, only for TorchScript models
and not eager mode models.
use_kineto (bool, optional): experimental, enable profiling with Kineto profiler.
use_cpu (bool, optional): profile CPU events; setting to ``False`` requires
``use_kineto=True`` and can be used to lower the overhead for GPU-only profiling.
.. warning:
Enabling memory profiling or source attribution incurs additional profiler
overhead
.. warning:
This context managers should not be called recursively, i.e. no nested
instances are allowed
.. warning:
Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_),
one cannot use the profiler with ``use_cuda = True`` to benchmark
DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading,
please use ``use_cuda = False`` or ``num_workers = 0``.
Example:
>>> x = torch.randn((1, 1), requires_grad=True)
>>> with torch.autograd.profiler.profile() as prof:
>>> for _ in range(100): # any normal python code, really!
>>> y = x ** 2
>> y.backward()
>>> # NOTE: some columns were removed for brevity
>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
----------------------------------- --------------- --------------- ---------------
Name Self CPU total CPU time avg Number of Calls
----------------------------------- --------------- --------------- ---------------
mul 32.048ms 32.048ms 200
pow 27.041ms 27.041ms 200
PowBackward0 9.727ms 55.483ms 100
torch::autograd::AccumulateGrad 9.148ms 9.148ms 100
torch::autograd::GraphRoot 691.816us 691.816us 100
----------------------------------- --------------- --------------- ---------------
"""
def __init__(
self,
enabled=True,
*,
use_cuda=False,
record_shapes=False,
with_flops=False,
profile_memory=False,
with_stack=False,
with_modules=False,
use_kineto=False,
use_cpu=True):
self.enabled: bool = enabled
if not self.enabled:
return
self.use_cuda = use_cuda
self.function_events: Optional[EventList] = None
self.entered = False
self.record_shapes = record_shapes
self.with_flops = with_flops
self.record_shapes |= self.with_flops
self.profile_memory = profile_memory
self.with_stack = with_stack
self.with_modules = with_modules
self.use_cpu = use_cpu
self.kineto_results: Optional[_ProfilerResult] = None
if not self.use_cpu:
assert use_kineto, \
"Device-only events supported only with Kineto (use_kineto=True)"
if self.use_cuda and not torch.cuda.is_available():
warn("CUDA is not available, disabling CUDA profiling")
self.use_cuda = False
self.kineto_activities = set()
if self.use_cpu:
self.kineto_activities.add(ProfilerActivity.CPU)
self.profiler_kind = ProfilerState.KINETO
if self.use_cuda:
if (not use_kineto or ProfilerActivity.CUDA not in
_supported_activities()):
assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True"
self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK
else:
self.kineto_activities.add(ProfilerActivity.CUDA)
assert len(self.kineto_activities) > 0, \
"No activities specified for the profiler"
def config(self):
return ProfilerConfig(
self.profiler_kind,
self.record_shapes,
self.profile_memory,
self.with_stack,
self.with_flops,
self.with_modules)
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("Profiler context manager is not reentrant")
self._prepare_trace()
self._start_trace()
return self
def _prepare_trace(self):
self.entered = True
_prepare_profiler(self.config(), self.kineto_activities)
def _start_trace(self):
self.entered = True
_enable_profiler(self.config(), self.kineto_activities)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
if self.use_cuda:
torch.cuda.synchronize()
self.kineto_results = _disable_profiler()
parsed_results = self._parse_kineto_results(self.kineto_results)
self.function_events = EventList(
parsed_results,
use_cuda=self.use_cuda,
profile_memory=self.profile_memory,
with_flops=self.with_flops)
self.function_events._build_tree()
return False
def __repr__(self):
if self.function_events is None:
return '<unfinished torch.autograd.profile>'
return repr(self.function_events)
def __str__(self):
if self.function_events is None:
return '<unfinished torch.autograd.profile>'
return str(self.function_events)
def _check_finish(self):
if self.function_events is None:
raise RuntimeError("Profiler didn't finish running")
def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False):
self._check_finish()
assert self.function_events is not None
return self.function_events.table(
sort_by=sort_by, row_limit=row_limit, max_src_column_width=max_src_column_width, header=header,
top_level_events_only=top_level_events_only
)
table.__doc__ = EventList.table.__doc__
def export_chrome_trace(self, path):
self._check_finish()
if kineto_available():
self.kineto_results.save(path) # type: ignore[union-attr]
else:
return self.function_events.export_chrome_trace(path) # type: ignore[union-attr]
export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__
def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
assert self.with_stack, "export_stacks() requires with_stack=True"
return self.function_events.export_stacks(path, metric)
def key_averages(self, group_by_input_shape=False, group_by_stack_n=0):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.key_averages(group_by_input_shape, group_by_stack_n)
key_averages.__doc__ = EventList.key_averages.__doc__
def total_average(self):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.total_average()
total_average.__doc__ = EventList.total_average.__doc__
@property
def self_cpu_time_total(self):
""" Returns total time spent on CPU obtained as a sum of
all self times across all the events.
"""
self._check_finish()
assert self.function_events is not None
return self.function_events.self_cpu_time_total
def _parse_kineto_results(self, result):
# result.events() has most of the events - PyTorch op-level and device-level events
trace_start_us = result.trace_start_us()
mem_records = [[evt, False] for evt in result.events() if evt.name() == MEMORY_EVENT_NAME]
mem_records_acc = MemRecordsAcc(mem_records)
def _cpu_memory_usage(mem_record):
return mem_record.nbytes() if \
mem_record.device_type() in [DeviceType.CPU, DeviceType.MKLDNN, DeviceType.IDEEP] \
else 0
def _cuda_memory_usage(mem_record):
return mem_record.nbytes() if \
mem_record.device_type() in [DeviceType.CUDA, DeviceType.HIP] \
else 0
# Create and return FunctionEvent list
function_events = []
cuda_corr_map: Dict[int, List[FunctionEvent]] = {}
max_evt_id = 0
for kineto_event in result.events():
if _filter_name(kineto_event.name()):
continue
rel_start_us = kineto_event.start_us() - trace_start_us
rel_end_us = rel_start_us + kineto_event.duration_us()
abs_end_us = kineto_event.start_us() + kineto_event.duration_us()
cpu_memory_usage = 0
cuda_memory_usage = 0
if kineto_event.device_type() == DeviceType.CPU:
# find the corresponding memory allocation events
for mem_record in mem_records_acc.in_interval(kineto_event.start_us(), abs_end_us):
cpu_memory_usage += _cpu_memory_usage(mem_record[0])
cuda_memory_usage += _cuda_memory_usage(mem_record[0])
mem_record[1] = True
is_async = kineto_event.is_async() or (
kineto_event.start_thread_id() != kineto_event.end_thread_id()
)
fe = FunctionEvent(
id=kineto_event.correlation_id(),
name=_rewrite_name(name=kineto_event.name(), with_wildcard=True),
trace_name=_rewrite_name(name=kineto_event.name(), with_wildcard=False),
thread=kineto_event.start_thread_id(),
start_us=rel_start_us,
end_us=rel_end_us,
fwd_thread=kineto_event.fwd_thread_id(),
input_shapes=kineto_event.shapes(),
stack=[entry for entry in kineto_event.stack() if _filter_stack_entry(entry)],
scope=kineto_event.scope(),
cpu_memory_usage=cpu_memory_usage,
cuda_memory_usage=cuda_memory_usage,
is_async=is_async,
sequence_nr=kineto_event.sequence_nr(),
device_type=kineto_event.device_type(),
device_index=kineto_event.device_index(),
flops=kineto_event.flops(),
)
max_evt_id = fe.id if fe.id > max_evt_id else max_evt_id
if fe.device_type == DeviceType.CPU and not fe.is_async:
# Check if we have CUDA time as a fallback
cuda_time = kineto_event.cuda_elapsed_us()
if cuda_time > 0:
fe.append_kernel(
fe.name,
fe.device_index,
cuda_time)
fe.is_legacy = True
function_events.append(fe)
corr_id = kineto_event.linked_correlation_id()
if corr_id > 0:
if corr_id not in cuda_corr_map:
cuda_corr_map[corr_id] = []
cuda_corr_map[corr_id].append(fe)
# associate CUDA kernels and CUDA runtime (CPU) with CPU events
for fe in function_events:
if (fe.device_type == DeviceType.CPU and not fe.is_async and
fe.id in cuda_corr_map):
for f_evt in cuda_corr_map[fe.id]:
if f_evt.device_type == DeviceType.CUDA:
fe.append_kernel(
f_evt.name,
f_evt.device_index,
f_evt.time_range.end - f_evt.time_range.start)
elif f_evt.device_type == DeviceType.CPU:
# make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated
# with the 'thread' of the corresponding linked PyTorch event to properly track
# parents and children
f_evt.thread = fe.thread
# output top-level memory events
for mem_record in mem_records:
if not mem_record[1]:
rel_start_us = mem_record[0].start_us() - trace_start_us
max_evt_id += 1
fe = FunctionEvent(
id=max_evt_id,
name=MEMORY_EVENT_NAME,
trace_name=None, # not outputting in the trace
thread=mem_record[0].start_thread_id(),
start_us=rel_start_us,
end_us=rel_start_us, # no duration
fwd_thread=mem_record[0].start_thread_id(),
input_shapes=[],
stack=[],
scope=0, # RecordScope::FUNCTION
cpu_memory_usage=_cpu_memory_usage(mem_record[0]),
cuda_memory_usage=_cuda_memory_usage(mem_record[0]),
is_async=False,
sequence_nr=-1,
device_type=DeviceType.CPU,
device_index=0,
)
function_events.append(fe)
function_events.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end])
return function_events
class record_function(ContextDecorator):
"""Context manager/function decorator that adds a label to a block of
Python code (or function) when running autograd profiler. It is
useful when tracing the code profile.
Args:
name (str): Label assigned to the block of code.
node_id (int): ID of node, for distributed profiling. Unset in
non-distributed cases.
Example:
>>> x = torch.randn((1, 1), requires_grad=True)
>>> with torch.autograd.profiler.profile() as prof:
... y = x ** 2
... with torch.autograd.profiler.record_function("label-z"): # label the block
... z = y ** 3
... y.backward()
...
>>> # NOTE: some columns were removed for brevity
>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
----------------------------------- --------------- --------------- ---------------
Name Self CPU total % CPU time avg Number of Calls
----------------------------------- --------------- --------------- ---------------
pow 60.77% 47.470us 3
mul 21.73% 25.465us 2
PowBackward0 12.03% 121.891us 1
torch::autograd::AccumulateGrad 2.70% 6.324us 1
label-z 2.13% 12.421us 1
torch::autograd::GraphRoot 0.64% 1.503us 1
----------------------------------- --------------- --------------- ---------------
Self CPU time total: 234.344us
CUDA time total: 0.000us
"""
def __init__(self, name: str, args: Optional[str] = None):
self.name: str = name
self.args: Optional[str] = args
# Whether or not we should run record function's end callbacks when exiting.
self.run_callbacks_on_exit: bool = True
# Stores underlying RecordFunction as a tensor. TODO: move to custom
# class (https://github.com/pytorch/pytorch/issues/35026).
self.handle: torch.Tensor = torch.zeros(1)
def __enter__(self):
self.handle = torch.ops.profiler._record_function_enter(self.name, self.args)
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
if self.run_callbacks_on_exit:
torch.ops.profiler._record_function_exit(self.handle)
def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:
"""
_call_end_callbacks_on_future is meant to be used for profiling async
calls that return a future. Calling this function will extend recording
beyond this scope, until the future is satisfied. It is useful for profiling
the end to end time of asynchronous calls. This function should only be called
once to attach the callback onto the future, and will throw if called multiple
times.
Args:
fut: (torch._C.Future): future for which to schedule
callback for.
Returns:
A future that completes with the value of the passed in future when
the profiling callbacks have ran.
"""
# Throw if we have already attached a callback onto the future.
if not self.run_callbacks_on_exit:
raise RuntimeError("_call_end_callbacks_on_future can only be called once.")
# We are scheduling to run this RecordFunction's end callbacks when the
# passed in future completes, so don't run end callbacks on exit.
self.run_callbacks_on_exit = False
profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(self.handle, fut)
return profiled_future
class emit_nvtx(object):
"""Context manager that makes every autograd operation emit an NVTX range.
It is useful when running the program under nvprof::
nvprof --profile-from-start off -o trace_name.prof -- <regular command here>
Unfortunately, there's no way to force nvprof to flush the data it collected
to disk, so for CUDA profiling one has to use this context manager to annotate
nvprof traces and wait for the process to exit before inspecting them.
Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or
:func:`torch.autograd.profiler.load_nvprof` can load the results for inspection
e.g. in Python REPL.
.. warning:
This context manager should not be called recursively, i.e. at most one
instance should be enabled at any given time.
Args:
enabled (bool, optional, default=True): Setting ``enabled=False`` makes this context manager a no-op.
Default: ``True``.
record_shapes (bool, optional, default=False): If ``record_shapes=True``, the nvtx range wrapping
each autograd op will append information about the sizes of Tensor arguments received
by that op, in the following format:
``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``
Non-tensor arguments will be represented by ``[]``.
Arguments will be listed in the order they are received by the backend op.
Please note that this order may not match the order in which those arguments were passed
on the Python side. Also note that shape recording may increase the overhead of nvtx range creation.
Example:
>>> with torch.cuda.profiler.profile():
... model(x) # Warmup CUDA memory allocator and profiler
... with torch.autograd.profiler.emit_nvtx():
... model(x)
**Forward-backward correlation**
When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler,
correlating each backward-pass op with the corresponding forward-pass op can be difficult.
To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it
generates.
During the forward pass, each function range is decorated with ``seq=<N>``. ``seq`` is a running
counter, incremented each time a new backward Function object is created and stashed for backward.
Thus, the ``seq=<N>`` annotation associated with each forward function range tells you that
if a backward Function object is created by this forward function,
the backward object will receive sequence number N.
During the backward pass, the top-level range wrapping each C++ backward Function's
``apply()`` call is decorated with ``stashed seq=<M>``. ``M`` is the sequence number that
the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq``
numbers in forward, you can track down which forward op created each backward Function.
Any functions executed during the backward pass are also decorated with ``seq=<N>``. During
default backward (with ``create_graph=False``) this information is irrelevant, and in fact,
``N`` may simply be 0 for all such functions. Only the top-level ranges associated with
backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function
objects with the earlier forward pass.
**Double-backward**
If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words,
if you are setting up for a double-backward), each function's execution during backward
is given a nonzero, useful ``seq=<N>``. Those functions may themselves create Function objects
to be executed later during double-backward, just as the original functions in the forward pass did.
The relationship between backward and double-backward is conceptually the same as the relationship
between forward and backward: The functions still emit current-sequence-number-tagged ranges,
the Function objects they create still stash those sequence numbers, and during the eventual
double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq``
numbers, which can be compared to `seq` numbers from the backward pass.
.. warning:
The sequence number is thread-local, and some forward functions don't create an associated
backward Function object (instead delegating that to sub-functions further down the call chain).
For these reasons, the correspondence of stashed sequence numbers in
backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is
not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully
disambiguate which forward function created which
backward Function object. You may need to make a judgment based on analytic knowledge of what
the expected correspondence should be.
"""
def __init__(self, enabled=True, record_shapes=False):
self.enabled = enabled
self.entered = False
self.record_shapes = record_shapes
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("NVTX annotation context manager is not reentrant")
self.entered = True
torch.cuda.synchronize()
_enable_profiler(
ProfilerConfig(
ProfilerState.NVTX,
self.record_shapes,
False,
False,
False,
False),
set()
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
torch.cuda.synchronize()
_disable_profiler()
return False
def load_nvprof(path):
"""Opens an nvprof trace file and parses autograd annotations.
Args:
path (str): path to nvprof trace
"""
return EventList(parse_nvprof_trace(path))
class EnforceUnique(object):
"""Raises an error if a key is seen more than once."""
def __init__(self):
self.seen = set()
def see(self, *key):
if key in self.seen:
raise RuntimeError('duplicate key: ' + str(key))
self.seen.add(key)
def parse_nvprof_trace(path):
import sqlite3
conn = sqlite3.connect(path)
conn.row_factory = sqlite3.Row
# Parse strings table
strings = {}
for r in conn.execute("SELECT _id_ as id, value FROM StringTable"):
strings[r["id"]] = torch._C._demangle(r["value"])
# First, find all functions and create FunctionEvents for them
marker_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
WHERE
start.name != 0 AND end.name = 0
"""
functions = []
functions_map = {}
unique = EnforceUnique()
for row in conn.execute(marker_query):
unique.see(row['marker_id'])
evt = FunctionEvent(id=row['marker_id'],
node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure
# that pytorch doesn't crash when creating a FunctionEvent() object
name=strings[row['name']],
start_us=row['start_time'],
end_us=row['end_time'],
thread=0) # TODO: find in sqlite database
functions.append(evt)
functions_map[evt.id] = evt
# Now, correlate all kernels with FunctionEvents
kernel_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp, end.timestamp,
runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end,
kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start
INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime
ON (start.timestamp < runtime.start AND runtime.end < end.timestamp)
INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel
ON kernel.correlationId = runtime.correlationId
"""
unique = EnforceUnique()
for row in conn.execute(kernel_query):
unique.see(row['marker_id'], row['runtime_id'])
# 211 is cudaKernelLaunch for cuda >= 9.2
assert (row['cbid'] == 211)
evt = functions_map[row['marker_id']]
evt.append_kernel(row['kernel_name'],
0,
row['kernel_end'] - row['kernel_start'])
functions.sort(key=lambda evt: evt.time_range.start)
return functions
| null | null | null | null | null | null | null | null | null |
[] | 9,615 |
tests/validation/tests/v1_api/test_deployment.py
|
ursinnDev/rancher_rancher
| 18,697 |
9768808
|
from .common import * # NOQA
import pytest
namespace = {"client": None, "ns": None}
def test_namespace_create():
template = read_yaml_from_resource_dir("namespace.yaml")
template["metadata"]["name"] = random_test_name()
client = namespace["client"]
res = client.create_namespace(template)
# validate the namespace is created
ns = client.by_id_namespace(res.id)
assert ns.id == res.id
# delete the namespace at the end
client.delete(ns)
def test_deployment():
client = namespace["client"]
ns = namespace["ns"]
template = read_json_from_resource_dir("deployment_1.json")
name = random_name()
# set name
template["metadata"]["name"] = name
# set namespace
template["metadata"]["namespace"] = ns.id
# set container image and name
template["spec"]["template"]["spec"]["containers"][0]["image"] = TEST_IMAGE_V1
template["spec"]["template"]["spec"]["containers"][0]["name"] = name
# set label and selector
label_value = "apps.deployment-{}-{}".format(ns.id, name)
labels = template["spec"]["template"]["metadata"]["labels"]
labels["workload.user.cattle.io/workloadselector"] = label_value
matches = template["spec"]["selector"]["matchLabels"]
matches["workload.user.cattle.io/workloadselector"] = label_value
deployment = client.create_apps_deployment(template)
deployment = validate_deployment(client, deployment)
# scale up to 5 pods
deployment.spec.replicas = 5
deployment = client.update(deployment, deployment)
deployment = validate_deployment(client, deployment)
client.delete(deployment)
def validate_deployment(client, deployment):
# wait for the deployment to be active
wait_for(lambda: client.reload(deployment).metadata.state.name == "active",
timeout_message="time out waiting for deployment to be ready")
res = client.reload(deployment)
name = res["metadata"]["name"]
namespace = res["metadata"]["namespace"]
replicas = res["spec"]["replicas"]
# Rancher Dashboard gets pods by passing the label selector
target_label = 'workload.user.cattle.io/workloadselector=apps.deployment-{}-{}'
pods = client.list_pod(
labelSelector=target_label.format(namespace, name))
assert "data" in pods.keys(), "failed to get pods"
assert len(pods.data) == replicas, "failed to get the right number of pods"
for pod in pods.data:
assert pod.metadata.state.name == "running"
return res
@pytest.fixture(scope='module', autouse="True")
def create_client(request):
client = get_cluster_client_for_token_v1()
template = read_yaml_from_resource_dir("namespace.yaml")
template["metadata"]["name"] = random_test_name()
ns = client.create_namespace(template)
namespace["client"] = client
namespace["ns"] = ns
def fin():
client.delete(namespace["ns"])
request.addfinalizer(fin)
| null | null | null | null | null | null | null | null | null |
[] | 6,232 |
tests/components/motion_blinds/__init__.py
|
tbarbette/core
| 30,023 |
8241861
|
<reponame>tbarbette/core
"""Tests for the Motion Blinds integration."""
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 9,202 |
posthog/settings/cloud.py
|
dorucioclea/posthog
| 7,409 |
9633711
|
# Overridden in posthog-cloud
import sys
from posthog.settings.utils import get_from_env, print_warning, str_to_bool
# Early exit to avoid issues with cloud not being properly included
if get_from_env("MULTI_TENANCY", False, type_cast=str_to_bool):
print_warning(("️Environment variable MULTI_TENANCY is set, but cloud settings have not been included",))
sys.exit("[ERROR] Stopping Django server…\n")
| null | null | null | null | null | null | null | null | null |
[] | 5,668 |
xirl/xirl/evaluators/reward_visualizer.py
|
xxdreck/google-research
| 23,901 |
10173439
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reward visualizer."""
from .base import Evaluator
from .base import EvaluatorOutput
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import cdist
class RewardVisualizer(Evaluator):
"""Distance to goal state visualizer."""
def __init__(self, distance, num_plots):
"""Constructor.
Args:
distance: The distance metric to use when calculating nearest-neighbours.
num_plots: The number of reward plots to display.
Raises:
ValueError: If the distance metric is invalid.
"""
super().__init__(inter_class=False)
if distance not in ["sqeuclidean", "cosine"]:
raise ValueError(
"{} is not a supported distance metric.".format(distance))
# For plotting, we don't want to display squared euclidean distances so we
# will override to `euclidean` if it was selected.
if distance == "sqeuclidean":
distance = "euclidean"
self.distance = distance
self.num_plots = num_plots
def _gen_reward_plot(self, rewards):
"""Create a pyplot plot and save to buffer."""
fig, axes = plt.subplots(1, len(rewards), figsize=(6.4 * len(rewards), 4.8))
if len(rewards) == 1:
axes = [axes]
for i, rew in enumerate(rewards):
axes[i].plot(rew)
fig.text(0.5, 0.04, "Timestep", ha="center")
fig.text(0.04, 0.5, "Reward", va="center", rotation="vertical")
fig.canvas.draw()
img_arr = np.array(fig.canvas.renderer.buffer_rgba())[:, :, :3]
plt.close()
return img_arr
def _compute_goal_emb(self, embs):
"""Compute the mean of all last frame embeddings."""
goal_emb = [emb[-1, :] for emb in embs]
goal_emb = np.stack(goal_emb, axis=0)
goal_emb = np.mean(goal_emb, axis=0, keepdims=True)
return goal_emb
def evaluate(self, outs):
embs = [o.embs for o in outs]
goal_emb = self._compute_goal_emb(embs)
# Make sure we sample only as many as are available.
num_plots = min(len(embs), self.num_plots)
rand_idxs = np.random.choice(
np.arange(len(embs)), size=num_plots, replace=False)
# Compute rewards as distances to the goal embedding.
rewards = []
for idx in rand_idxs:
emb = embs[idx]
dists = cdist(emb, goal_emb, self.distance)
rewards.append(-dists)
image = self._gen_reward_plot(rewards)
return EvaluatorOutput(image=image)
| null | null | null | null | null | null | null | null | null |
[] | 11,923 |
python/paddle/fluid/tests/unittests/test_conv2d_transpose_op_depthwise_conv.py
|
zmxdream/Paddle
| 17,085 |
10185976
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
paddle.enable_static()
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest
from test_conv2d_transpose_op import TestConv2DTransposeOp
class TestDepthwiseConvTranspose(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.dilations = [1, 1]
self.input_size = [1, 8, 4, 4] # NCHW
self.groups = 8
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [self.input_size[1], f_c, 4, 4]
self.op_type = "depthwise_conv2d_transpose"
class TestDepthwiseConvTransposeAsymmetricPad(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1, 1, 2]
self.stride = [1, 1]
self.dilations = [1, 1]
self.input_size = [1, 8, 4, 4] # NCHW
self.groups = 8
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [self.input_size[1], f_c, 3, 3]
self.op_type = "depthwise_conv2d_transpose"
self.data_format = 'NCHW'
class TestDepthwiseConvTransposeSAMEPad(TestConv2DTransposeOp):
def init_test_case(self):
self.stride = [1, 1]
self.dilations = [1, 1]
self.input_size = [1, 8, 4, 4] # NHWC
self.groups = 8
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [self.input_size[1], f_c, 3, 3]
self.op_type = "depthwise_conv2d_transpose"
self.padding_algorithm = 'SAME'
class TestDepthwiseConvTransposeVALIDPad(TestConv2DTransposeOp):
def init_test_case(self):
self.stride = [1, 1]
self.dilations = [1, 1]
self.input_size = [1, 8, 4, 4] # NHWC
self.groups = 8
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [self.input_size[1], f_c, 3, 3]
self.op_type = "depthwise_conv2d_transpose"
self.padding_algorithm = 'VALID'
class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2DTransposeOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.dilations = [1, 1]
self.input_size = [1, 4, 4, 8] # NHWC
self.groups = 8
assert np.mod(self.input_size[3], self.groups) == 0
f_c = self.input_size[3] // self.groups
self.filter_size = [self.input_size[3], f_c, 3, 3]
self.op_type = "depthwise_conv2d_transpose"
self.data_format = 'NHWC'
if __name__ == '__main__':
unittest.main()
| null | null | null | null | null | null | null | null | null |
[] | 12,003 |
homeassistant/components/unifiprotect/light.py
|
liangleslie/core
| 30,023 |
515251
|
<gh_stars>1000+
"""This component provides Lights for UniFi Protect."""
from __future__ import annotations
import logging
from typing import Any
from pyunifiprotect.data import Light
from homeassistant.components.light import ATTR_BRIGHTNESS, ColorMode, LightEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .data import ProtectData
from .entity import ProtectDeviceEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up lights for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities = [
ProtectLight(
data,
device,
)
for device in data.api.bootstrap.lights.values()
]
if not entities:
return
async_add_entities(entities)
def unifi_brightness_to_hass(value: int) -> int:
"""Convert unifi brightness 1..6 to hass format 0..255."""
return min(255, round((value / 6) * 255))
def hass_to_unifi_brightness(value: int) -> int:
"""Convert hass brightness 0..255 to unifi 1..6 scale."""
return max(1, round((value / 255) * 6))
class ProtectLight(ProtectDeviceEntity, LightEntity):
"""A Ubiquiti UniFi Protect Light Entity."""
device: Light
_attr_icon = "mdi:spotlight-beam"
_attr_color_mode = ColorMode.BRIGHTNESS
_attr_supported_color_modes = {ColorMode.BRIGHTNESS}
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
self._attr_is_on = self.device.is_light_on
self._attr_brightness = unifi_brightness_to_hass(
self.device.light_device_settings.led_level
)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the light on."""
hass_brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
unifi_brightness = hass_to_unifi_brightness(hass_brightness)
_LOGGER.debug("Turning on light with brightness %s", unifi_brightness)
await self.device.set_light(True, unifi_brightness)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the light off."""
_LOGGER.debug("Turning off light")
await self.device.set_light(False)
|
0
|
0
|
0.0
|
1
|
0
|
0
|
1.0
|
0
|
7
|
[] | 10,391 |
tests_django/test_settings.py
|
gugux289/chatterbot
| 13,200 |
10102852
|
<gh_stars>1000+
"""
Django settings for when tests are run.
"""
import os
from chatterbot import constants
DEBUG = True
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'fake-key'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'chatterbot.ext.django_chatterbot',
'tests_django',
]
CHATTERBOT = {
'name': 'Test Django ChatterBot',
'logic_adapters': [
{
'import_path': 'chatterbot.logic.BestMatch',
},
{
'import_path': 'chatterbot.logic.MathematicalEvaluation',
}
],
'storage_adapter': 'chatterbot.storage.DjangoStorageAdapter',
'django_app_name': constants.DEFAULT_DJANGO_APP_NAME,
'initialize': False
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Using the MD5 password hasher improves test performance
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
USE_TZ = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
| null | null | null | null | null | null | null | null | null |
[] | 11,633 |
configs/paa/paa_r50_fpn_1.5x_coco.py
|
evgps/mmdetection_trashcan
| 20,190 |
11287256
|
<gh_stars>1000+
_base_ = './paa_r50_fpn_1x_coco.py'
lr_config = dict(step=[12, 16])
runner = dict(type='EpochBasedRunner', max_epochs=18)
| null | null | null | null | null | null | null | null | null |
[] | 6,811 |
homeassistant/components/fastdotcom/sensor.py
|
MrDelik/core
| 30,023 |
405011
|
<reponame>MrDelik/core
"""Support for Fast.com internet speed testing sensor."""
from __future__ import annotations
from typing import Any
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DATA_RATE_MEGABITS_PER_SECOND
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DATA_UPDATED, DOMAIN as FASTDOTCOM_DOMAIN
ICON = "mdi:speedometer"
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Fast.com sensor."""
async_add_entities([SpeedtestSensor(hass.data[FASTDOTCOM_DOMAIN])])
class SpeedtestSensor(RestoreEntity, SensorEntity):
"""Implementation of a FAst.com sensor."""
_attr_name = "Fast.com Download"
_attr_native_unit_of_measurement = DATA_RATE_MEGABITS_PER_SECOND
_attr_icon = ICON
_attr_should_poll = False
_attr_native_value = None
def __init__(self, speedtest_data: dict[str, Any]) -> None:
"""Initialize the sensor."""
self._speedtest_data = speedtest_data
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass, DATA_UPDATED, self._schedule_immediate_update
)
)
if not (state := await self.async_get_last_state()):
return
self._attr_native_value = state.state
def update(self) -> None:
"""Get the latest data and update the states."""
if (data := self._speedtest_data.data) is None: # type: ignore[attr-defined]
return
self._attr_native_value = data["download"]
@callback
def _schedule_immediate_update(self) -> None:
self.async_schedule_update_ha_state(True)
| null | null | null | null | null | null | null | null | null |
[] | 10,193 |
tests/nlu/featurizers/test_regex_featurizer.py
|
Next-Trends/rasa
| 3,603 |
6565391
|
<gh_stars>1000+
from typing import Text, List, Any, Tuple, Callable, Dict, Optional
import dataclasses
import numpy as np
import pytest
from rasa.engine.graph import ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.constants import SPACY_DOCS, TOKENS_NAMES
from rasa.shared.nlu.constants import TEXT, INTENT, RESPONSE
from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer
@pytest.fixture()
def resource() -> Resource:
return Resource("regex_featurizer")
@pytest.fixture()
def create_featurizer(
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
resource: Resource,
) -> Callable[..., RegexFeaturizer]:
def inner(
config: Dict[Text, Any] = None,
known_patterns: Optional[List[Dict[Text, Any]]] = None,
) -> RegexFeaturizer:
config = config or {}
return RegexFeaturizer(
{**RegexFeaturizer.get_default_config(), **config},
default_model_storage,
resource,
default_execution_context,
known_patterns,
)
return inner
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"labeled_tokens",
[
(
"hey how are you today",
[
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
[0.0, 1.0, 0.0],
[0],
),
(
"hey 456 how are you",
[
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
[1.0, 1.0, 0.0],
[1, 0],
),
(
"blah balh random eh",
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[0.0, 0.0, 0.0],
[],
),
(
"a 1 digit number",
[[0.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[1.0, 0.0, 1.0],
[1, 1],
),
],
)
def test_regex_featurizer(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[int],
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer(known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence, RESPONSE: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, tokens, expected_sequence_features, expected_sentence_features,"
"labeled_tokens",
[
(
"明天上海的天气怎么样?",
[("明天", 0), ("上海", 2), ("的", 4), ("天气", 5), ("怎么样", 7), ("?", 10)],
[[0.0, 1.0], [1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[1.0, 1.0],
[0.0, 1.0],
),
(
"北京的天气如何?",
[("北京", 0), ("的", 2), ("天气", 3), ("如何", 5), ("?", 7)],
[[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[1.0, 0.0],
[0.0],
),
(
"昨天和今天的天气都不错",
[("昨天", 0), ("和", 2), ("今天", 3), ("的", 5), ("天气", 6), ("都", 8), ("不错", 9)],
[
[0.0, 1.0],
[0.0, 0.0],
[0.0, 1.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
],
[0.0, 1.0],
[0.0, 2.0],
),
(
"后天呢?",
[("后天", 0), ("呢", 2), ("?", 3)],
[[0.0, 1.0], [0.0, 0.0], [0.0, 0.0]],
[0.0, 1.0],
[0.0],
),
],
)
def test_lookup_tables_without_use_word_boundaries(
sentence: Text,
tokens: List[Tuple[Text, float]],
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[float],
create_featurizer: Callable[..., RegexFeaturizer],
):
from rasa.nlu.tokenizers.tokenizer import Token
lookups = [
{"name": "cites", "elements": ["北京", "上海", "广州", "深圳", "杭州"]},
{"name": "dates", "elements": ["昨天", "今天", "明天", "后天"]},
]
ftr = create_featurizer({"use_word_boundaries": False})
training_data = TrainingData()
training_data.lookup_tables = lookups
ftr.train(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(TOKENS_NAMES[TEXT], [Token(word, start) for (word, start) in tokens])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features, "
"labeled_tokens",
[
(
"lemonade and mapo tofu",
[[1.0, 0.0], [0.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[1.0, 1.0],
[0.0, 2.0, 3.0],
),
(
"a cup of tea",
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [1.0, 0.0]],
[1.0, 0.0],
[3.0],
),
(
"Is burrito my favorite food?",
[[0.0, 0.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[0.0, 1.0],
[1.0],
),
("I want club?mate", [[0.0, 0.0], [0.0, 0.0], [1.0, 0.0]], [1.0, 0.0], [2.0]),
],
)
def test_lookup_tables(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[float],
spacy_nlp: Any,
spacy_tokenizer: SpacyTokenizer,
create_featurizer: Callable[..., RegexFeaturizer],
):
lookups = [
{
"name": "drinks",
"elements": ["mojito", "lemonade", "sweet berry wine", "tea", "club?mate"],
},
{"name": "plates", "elements": "data/test/lookup_tables/plates.txt"},
]
ftr = create_featurizer()
training_data = TrainingData()
training_data.lookup_tables = lookups
ftr.train(training_data)
ftr.process_training_data(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set("text_spacy_doc", spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features",
[
("hey how are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]),
("hey 456 how are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]),
("blah balh random eh", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]),
("a 1 digit number", [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]),
],
)
def test_regex_featurizer_no_sequence(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer(known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray()[0], expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray()[-1], expected_sentence_features, atol=1e-10
)
def test_regex_featurizer_train(
create_featurizer: Callable[..., RegexFeaturizer],
whitespace_tokenizer: WhitespaceTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
featurizer = create_featurizer()
sentence = "hey how are you today 19.12.2019 ?"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
whitespace_tokenizer.process_training_data(TrainingData([message]))
training_data = TrainingData([message], regex_features=patterns)
featurizer.train(training_data)
featurizer.process_training_data(training_data)
expected = np.array([0, 1, 0])
expected_cls = np.array([1, 1, 1])
seq_vecs, sen_vec = message.get_sparse_features(TEXT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (6, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
seq_vecs, sen_vec = message.get_sparse_features(RESPONSE, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (6, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
seq_vecs, sen_vec = message.get_sparse_features(INTENT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert seq_vecs is None
assert sen_vec is None
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"case_sensitive",
[
("Hey How are you today", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], True),
("Hey How are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], False),
("Hey 456 How are you", [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], True),
("Hey 456 How are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], False),
],
)
def test_regex_featurizer_case_sensitive(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
case_sensitive: bool,
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer({"case_sensitive": case_sensitive}, known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray()[0], expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray()[-1], expected_sentence_features, atol=1e-10
)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"labeled_tokens, use_word_boundaries",
[
("how are you", [[1.0], [0.0], [0.0]], [1.0], [0.0], True),
("how are you", [[1.0], [0.0], [0.0]], [1.0], [0.0], False),
("Take a shower", [[0.0], [0.0], [0.0]], [0.0], [], True),
("Take a shower", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
("What a show", [[0.0], [0.0], [0.0]], [0.0], [], True),
("What a show", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
("The wolf howled", [[0.0], [0.0], [0.0]], [0.0], [], True),
("The wolf howled", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
],
)
def test_lookup_with_and_without_boundaries(
sentence: Text,
expected_sequence_features: List[List[float]],
expected_sentence_features: List[float],
labeled_tokens: List[float],
use_word_boundaries: bool,
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
ftr = create_featurizer({"use_word_boundaries": use_word_boundaries})
training_data = TrainingData()
# we use lookups because the "use_word_boundaries" flag is only used when
# producing patterns from lookup tables
lookups = [{"name": "how", "elements": ["how"]}]
training_data.lookup_tables = lookups
ftr.train(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
(sequence_features, sentence_features) = ftr._features_for_patterns(message, TEXT)
sequence_features = sequence_features.toarray()
sentence_features = sentence_features.toarray()
num_of_patterns = sum([len(lookup["elements"]) for lookup in lookups])
assert sequence_features.shape == (
len(message.get(TOKENS_NAMES[TEXT])),
num_of_patterns,
)
num_of_lookup_tables = len(lookups)
assert sentence_features.shape == (num_of_lookup_tables, num_of_patterns)
# sequence_features should be {0,1} for each token: 1 if match, 0 if not
assert np.allclose(sequence_features, expected_sequence_features, atol=1e-10)
# sentence_features should be {0,1} for each lookup table: 1 if sentence
# contains match from that table, 0 if not
assert np.allclose(sentence_features, expected_sentence_features, atol=1e-10)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
# labeled_tokens should list the token(s) which match a pattern
assert num_matches == labeled_tokens.count(i)
def test_persist_load_for_finetuning(
create_featurizer: Callable[..., RegexFeaturizer],
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
resource: Resource,
whitespace_tokenizer: WhitespaceTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
featurizer = create_featurizer()
sentence = "hey how are you today 19.12.2019 ?"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
training_data = TrainingData([message], regex_features=patterns)
whitespace_tokenizer.process_training_data(training_data)
featurizer.train(training_data)
loaded_featurizer = RegexFeaturizer.load(
RegexFeaturizer.get_default_config(),
default_model_storage,
resource,
dataclasses.replace(default_execution_context, is_finetuning=True),
)
# Test component loaded in finetune mode and also with
# same patterns as before and vocabulary statistics
assert loaded_featurizer.known_patterns == featurizer.known_patterns
assert loaded_featurizer.finetune_mode
new_lookups = [{"name": "plates", "elements": "data/test/lookup_tables/plates.txt"}]
training_data = TrainingData()
training_data.lookup_tables = new_lookups
loaded_featurizer.train(training_data)
# Test merging of a new pattern to an already trained component.
assert len(loaded_featurizer.known_patterns) == 4
def test_vocabulary_expand_for_finetuning(
create_featurizer: Callable[..., RegexFeaturizer],
default_model_storage: ModelStorage,
resource: Resource,
default_execution_context: ExecutionContext,
whitespace_tokenizer: WhitespaceTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
]
featurizer = create_featurizer()
sentence = "hey hey 2020"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
training_data = TrainingData([message], regex_features=patterns)
whitespace_tokenizer.process_training_data(training_data)
featurizer.train(training_data)
featurizer.process_training_data(training_data)
# Test featurization of message
expected = np.array([1, 0])
expected_cls = np.array([1, 1])
seq_vecs, sen_vec = message.get_sparse_features(TEXT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (3, 2) == seq_vecs.shape
assert (1, 2) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
loaded_featurizer = RegexFeaturizer.load(
RegexFeaturizer.get_default_config(),
default_model_storage,
resource,
dataclasses.replace(default_execution_context, is_finetuning=True),
)
new_patterns = [
{"pattern": "\\btoday*", "name": "day", "usage": "intent"},
{"pattern": "\\bhey+", "name": "hello", "usage": "intent"},
]
new_sentence = "hey today"
message = Message(data={TEXT: new_sentence})
message.set(RESPONSE, new_sentence)
message.set(INTENT, "intent")
new_training_data = TrainingData([message], regex_features=patterns + new_patterns)
whitespace_tokenizer.process_training_data(new_training_data)
loaded_featurizer.train(new_training_data)
loaded_featurizer.process_training_data(new_training_data)
# Test featurization of message, this time for the extra pattern as well.
expected_token_1 = np.array([1, 0, 0])
expected_token_2 = np.array([0, 0, 1])
expected_cls = np.array([1, 0, 1])
seq_vecs, sen_vec = message.get_sparse_features(TEXT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (2, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected_token_1)
assert np.all(seq_vecs.toarray()[1] == expected_token_2)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
# let's check if the order of patterns is preserved
for old_index, pattern in enumerate(featurizer.known_patterns):
assert pattern["name"] == loaded_featurizer.known_patterns[old_index]["name"]
# we also modified a pattern, check if that is correctly modified
pattern_to_check = [
pattern
for pattern in loaded_featurizer.known_patterns
if pattern["name"] == "hello"
]
assert pattern_to_check == [new_patterns[1]]
| null | null | null | null | null | null | null | null | null |
[] | 4,471 |
tests/components/eafm/__init__.py
|
tbarbette/core
| 30,023 |
3327514
|
"""Tests for eafm."""
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 2,608 |
OpenCorePkg/Debug/Scripts/gdb_uefi.py
|
CEOALT1/RefindPlusUDK
| 10,125 |
11351705
|
<gh_stars>1000+
"""
Allows loading TianoCore symbols into a GDB session attached to EFI
Firmware.
This is how it works: build GdbSyms - it's a dummy binary that
contains the relevant symbols needed to find and load image symbols.
$ gdb /path/to/GdbSyms.dll
(gdb) target remote ....
(gdb) source Scripts/gdb_uefi.py
(gdb) reload-uefi -o /path/to/GdbSyms.dll
N.B: it was noticed that GDB for certain targets behaves strangely
when run without any binary - like assuming a certain physical
address space size and endianness. To avoid this madness and
seing strange bugs, make sure to pass /path/to/GdbSyms.dll
when starting gdb.
The -o option should be used if you've debugging EFI, where the PE
images were converted from MACH-O or ELF binaries.
"""
import array
import getopt
import binascii
import re
import sys
import os
import subprocess
import sys
sys.path.append(os.path.dirname(__file__))
from common_uefi import *
__license__ = "BSD"
__version = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Works"
if sys.version_info > (3,):
long = int
class ReloadUefi (gdb.Command):
"""Reload UEFI symbols"""
#
# Various constants.
#
EINVAL = 0xffffffff
CV_NB10 = 0x3031424E
CV_RSDS = 0x53445352
CV_MTOC = 0x434F544D
DOS_MAGIC = 0x5A4D
PE32PLUS_MAGIC = 0x20b
EST_SIGNATURE = 0x5453595320494249
DEBUG_GUID = [0x49152E77, 0x1ADA, 0x4764,
[0xB7,0xA2,0x7A,0xFE,
0xFE,0xD9,0x5E, 0x8B]]
DEBUG_IS_UPDATING = 0x1
#
# If the images were built as ELF/MACH-O and then converted to PE,
# then the base address needs to be offset by PE headers.
#
offset_by_headers = False
def __init__ (self):
super (ReloadUefi, self).__init__ ("reload-uefi", gdb.COMMAND_OBSCURE)
#
# Returns gdb.Type for a type.
#
def type (self, typename):
return gdb.lookup_type (typename)
#
# Returns gdb.Type for a pointer to a type.
#
def ptype (self, typename):
return gdb.lookup_type (typename).pointer ()
#
# Computes CRC32 on an array of data.
#
def crc32 (self, data):
return binascii.crc32 (data) & 0xFFFFFFFF
#
# Sets a field in a struct to a value, i.e.
# value->field_name = data.
#
# Newer Py bindings to Gdb provide access to the inferior
# memory, but not all, so have to do it this awkward way.
#
def set_field (self, value, field_name, data):
gdb.execute ("set *(%s *) 0x%x = 0x%x" % \
(str (value[field_name].type), \
long (value[field_name].address), data))
#
# Returns data backing a gdb.Value as an array.
# Same comment as above regarding newer Py bindings...
#
def value_data (self, value, bytes=0):
value_address = gdb.Value (value.address)
array_t = self.ptype ('UINT8')
value_array = value_address.cast (array_t)
if bytes == 0:
bytes = value.type.sizeof
data = array.array ('B')
for i in range (0, bytes):
data.append (value_array[i])
return data
#
# Locates the EFI_SYSTEM_TABLE as per UEFI spec 17.4.
# Returns base address or -1.
#
def search_est (self):
address = 0
estp_t = self.ptype ('EFI_SYSTEM_TABLE_POINTER')
while True:
try:
estp = gdb.Value(address).cast(estp_t)
if estp['Signature'] == self.EST_SIGNATURE:
oldcrc = long(estp['Crc32'])
self.set_field (estp, 'Crc32', 0)
newcrc = self.crc32 (self.value_data (estp.dereference (), 0))
self.set_field (estp, 'Crc32', long(oldcrc))
if newcrc == oldcrc:
print('EFI_SYSTEM_TABLE_POINTER @ 0x%x' % address)
return estp['EfiSystemTableBase']
except gdb.MemoryError:
pass
address += 4 * 2**20
if address >= 2**32:
return self.EINVAL
#
# Searches for a vendor-specific configuration table (in EST),
# given a vendor-specific table GUID. GUID is a list like -
# [32-bit, 16-bit, 16-bit, [8 bytes]]
#
def search_config (self, cfg_table, count, guid):
index = 0
while index != count:
cfg_entry = cfg_table[index]['VendorGuid']
if cfg_entry['Data1'] == guid[0] and \
cfg_entry['Data2'] == guid[1] and \
cfg_entry['Data3'] == guid[2] and \
self.value_data (cfg_entry['Data4']).tolist () == guid[3]:
return cfg_table[index]['VendorTable']
index = index + 1
return gdb.Value(self.EINVAL)
#
# Returns offset of a field within structure. Useful
# for getting container of a structure.
#
def offsetof (self, typename, field):
t = gdb.Value (0).cast (self.ptype (typename))
return long(t[field].address)
#
# Returns sizeof of a type.
#
def sizeof (self, typename):
return self.type (typename).sizeof
#
# Returns the EFI_IMAGE_NT_HEADERS32 pointer, given
# an ImageBase address as a gdb.Value.
#
def pe_headers (self, imagebase):
dosh_t = self.ptype ('EFI_IMAGE_DOS_HEADER')
head_t = self.ptype ('EFI_IMAGE_OPTIONAL_HEADER_UNION')
dosh = imagebase.cast (dosh_t)
h_addr = long(imagebase)
if dosh['e_magic'] == self.DOS_MAGIC:
h_addr = h_addr + long(dosh['e_lfanew'])
return gdb.Value(h_addr).cast (head_t)
#
# Returns a dictionary with PE sections.
#
def pe_sections (self, opt, file, imagebase):
sect_t = self.ptype ('EFI_IMAGE_SECTION_HEADER')
sections = (opt.address + 1).cast (sect_t)
sects = {}
for i in range (file['NumberOfSections']):
name = UefiMisc.parse_utf8 (sections[i]['Name'])
addr = long(sections[i]['VirtualAddress'])
if name != '':
sects[name] = addr
return sects
#
# Returns True if pe_headers refer to a PE32+ image.
#
def pe_is_64 (self, pe_headers):
if pe_headers['Pe32']['OptionalHeader']['Magic'] == self.PE32PLUS_MAGIC:
return True
return False
#
# Returns the PE fileheader.
#
def pe_file (self, pe):
if self.pe_is_64 (pe):
return pe['Pe32Plus']['FileHeader']
else:
return pe['Pe32']['FileHeader']
#
# Returns the PE (not so) optional header.
#
def pe_optional (self, pe):
if self.pe_is_64 (pe):
return pe['Pe32Plus']['OptionalHeader']
else:
return pe['Pe32']['OptionalHeader']
#
# Returns the symbol file name for a PE image.
#
def pe_parse_debug (self, pe):
opt = self.pe_optional (pe)
debug_dir_entry = opt['DataDirectory'][6]
dep = debug_dir_entry['VirtualAddress'] + opt['ImageBase']
dep = dep.cast (self.ptype ('EFI_IMAGE_DEBUG_DIRECTORY_ENTRY'))
cvp = dep.dereference ()['RVA'] + opt['ImageBase']
cvv = cvp.cast(self.ptype ('UINT32')).dereference ()
if cvv == self.CV_NB10:
return cvp + self.sizeof('EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY')
elif cvv == self.CV_RSDS:
return cvp + self.sizeof('EFI_IMAGE_DEBUG_CODEVIEW_RSDS_ENTRY')
elif cvv == self.CV_MTOC:
return cvp + self.sizeof('EFI_IMAGE_DEBUG_CODEVIEW_MTOC_ENTRY')
return gdb.Value(self.EINVAL)
#
# Prepares gdb symbol load command with proper section information.
# Currently supports Mach-O and single-section files.
#
# TODO: Proper ELF support.
#
def get_sym_cmd (self, file, orgbase, sections, macho, fallack_base):
cmd = 'add-symbol-file %s' % file
# Fallback case, no sections, just load .text.
if not sections.get('.text') or not sections.get('.data'):
cmd += ' 0x%x' % (fallack_base)
return cmd
cmd += ' 0x%x' % (long(orgbase) + sections['.text'])
if not macho or not os.path.exists(file):
# Another fallback, try to load data at least.
cmd += ' -s .data 0x%x' % (long(orgbase) + sections['.data'])
return cmd
# 1. Parse Mach-O.
# FIXME: We should not rely on otool really.
commands = subprocess.check_output(['otool', '-l', file])
try:
lines = commands.decode('utf-8').split('\n')
except:
lines = commands.split('\n')
in_sect = False
machsections = {}
for line in lines:
line = line.strip()
if line.startswith('Section'):
in_sect = True
sectname = None
segname = None
elif in_sect:
if line.startswith('sectname'):
sectname = line.split()[1]
elif line.startswith('segname'):
segname = line.split()[1]
elif line.startswith('addr'):
machsections[segname + '.' + sectname] = long(line.split()[1], base=16)
in_sect = False
# 2. Convert section names to gdb sections.
mapping = {
'__TEXT.__cstring': '.cstring',
'__TEXT.__const': '.const',
'__TEXT.__ustring': '__TEXT.__ustring',
'__DATA.__const': '.const_data',
'__DATA.__data': '.data',
'__DATA.__bss': '.bss',
'__DATA.__common': '__DATA.__common',
# FIXME: These should not be loadable, but gdb still loads them :/
# '__DWARF.__apple_names': '__DWARF.__apple_names',
# '__DWARF.__apple_namespac': '__DWARF.__apple_namespac',
# '__DWARF.__apple_types': '__DWARF.__apple_types',
# '__DWARF.__apple_objc': '__DWARF.__apple_objc',
}
# 3. Rebase.
for entry in mapping:
if machsections.get(entry):
cmd += ' -s %s 0x%x' % (mapping[entry], long(orgbase) + machsections[entry])
return cmd
#
# Parses an EFI_LOADED_IMAGE_PROTOCOL, figuring out the symbol file name.
# This file name is then appended to list of loaded symbols.
#
# TODO: Support TE images.
#
def parse_image (self, image, syms):
orgbase = base = image['ImageBase']
pe = self.pe_headers (base)
opt = self.pe_optional (pe)
file = self.pe_file (pe)
sym_name = self.pe_parse_debug (pe)
sections = self.pe_sections (opt, file, base)
# For ELF and Mach-O-derived images...
if self.offset_by_headers:
base = base + opt['SizeOfHeaders']
if sym_name != self.EINVAL:
sym_name = sym_name.cast (self.ptype('CHAR8')).string ()
sym_name_dbg = re.sub(r"\.dll$", ".debug", sym_name)
macho = False
if os.path.isdir(sym_name + '.dSYM'):
sym_name += '.dSYM/Contents/Resources/DWARF/' + os.path.basename(sym_name)
macho = True
elif sym_name_dbg != sym_name and os.path.exists(sym_name_dbg):
# TODO: implement .elf handling.
sym_name = sym_name_dbg
syms.append (self.get_sym_cmd (sym_name, long(orgbase), sections, macho, long(base)))
#
# Parses table EFI_DEBUG_IMAGE_INFO structures, builds
# a list of add-symbol-file commands, and reloads debugger
# symbols.
#
def parse_edii (self, edii, count):
index = 0
syms = []
print ("Found {} images...".format(count))
while index != count:
entry = edii[index]
if entry['ImageInfoType'].dereference () == 1:
entry = entry['NormalImage']
self.parse_image(entry['LoadedImageProtocolInstance'], syms)
else:
print ("Skipping unknown EFI_DEBUG_IMAGE_INFO (Type 0x%x)" % \
entry['ImageInfoType'].dereference ())
index = index + 1
gdb.execute ("symbol-file")
print ("Loading new symbols...")
for sym in syms:
try:
gdb.execute (sym)
except (gdb.error) as err:
print ('Failed: %s' % err)
#
# Parses EFI_DEBUG_IMAGE_INFO_TABLE_HEADER, in order to load
# image symbols.
#
def parse_dh (self, dh):
dh_t = self.ptype ('EFI_DEBUG_IMAGE_INFO_TABLE_HEADER')
dh = dh.cast (dh_t)
print ("DebugImageInfoTable @ 0x%x, 0x%x entries" % \
(long (dh['EfiDebugImageInfoTable']), dh['TableSize']))
if dh['UpdateStatus'] & self.DEBUG_IS_UPDATING:
print ("EfiDebugImageInfoTable update in progress, retry later")
return
self.parse_edii (dh['EfiDebugImageInfoTable'], dh['TableSize'])
#
# Parses EFI_SYSTEM_TABLE, in order to load image symbols.
#
def parse_est (self, est):
est_t = self.ptype ('EFI_SYSTEM_TABLE')
est = est.cast (est_t)
print ("Connected to %s (Rev. 0x%x)" % \
(UefiMisc.parse_utf16 (est['FirmwareVendor']), \
long (est['FirmwareRevision'])))
print ("ConfigurationTable @ 0x%x, 0x%x entries" % \
(long (est['ConfigurationTable']), est['NumberOfTableEntries']))
dh = self.search_config(est['ConfigurationTable'],
est['NumberOfTableEntries'], self.DEBUG_GUID)
if dh == self.EINVAL:
print ("No EFI_DEBUG_IMAGE_INFO_TABLE_HEADER")
return
self.parse_dh (dh)
#
# Usage information.
#
def usage (self):
print ("Usage: reload-uefi [-o] [/path/to/GdbSyms.dll]")
#
# Handler for reload-uefi.
#
def invoke (self, arg, from_tty):
args = arg.split(' ')
try:
opts, args = getopt.getopt(args, "o", ["offset-by-headers"])
except (getopt.GetoptError) as err:
self.usage ()
return
for opt, arg in opts:
if opt == "-o":
self.offset_by_headers = True
if len(args) >= 1 and args[0] != '':
gdb.execute ("symbol-file")
gdb.execute ("symbol-file %s" % args[0])
else:
# FIXME: gdb.objfiles () loses files after symbol-file execution,
# so we have to extract GdbSymbs.dll manually.
lines = gdb.execute ("info files", to_string=True).split('\n')
for line in lines:
m = re.search("`([^']+)'", line)
if m:
gdb.execute ("symbol-file")
gdb.execute ("symbol-file %s" % m.group(1))
break
est = self.search_est ()
if est == self.EINVAL:
print ("No EFI_SYSTEM_TABLE...")
return
print ("EFI_SYSTEM_TABLE @ 0x%x" % est)
self.parse_est (est)
class UefiStringPrinter:
def __init__(self, val):
self.val = val
def to_string (self):
if not self.val:
return "NULL"
return 'L"' + UefiMisc.parse_utf16(self.val) + '"'
class UefiEfiStatusPrinter:
def __init__(self, val):
self.val = val
def to_string (self):
return UefiMisc.parse_status(self.val, True)
class UefiReturnStatusPrinter:
def __init__(self, val):
self.val = val
def to_string (self):
return UefiMisc.parse_status(self.val, False)
class UefiGuidPrinter:
def __init__(self, val):
self.val = val
def to_string (self):
return UefiMisc.parse_guid(self.val)
def lookup_uefi_type (val):
if str(val.type) == 'const CHAR16 *' or str(val.type) == 'CHAR16 *':
return UefiStringPrinter(val)
elif str(val.type) == 'EFI_STATUS':
return UefiEfiStatusPrinter(val)
elif str(val.type) == 'RETURN_STATUS':
return UefiReturnStatusPrinter(val)
elif str(val.type) == 'GUID' or str(val.type) == 'EFI_GUID':
return UefiGuidPrinter(val)
return None
ReloadUefi ()
gdb.pretty_printers.append (lookup_uefi_type)
|
12
|
0
|
0.0
|
81
|
0
|
12
|
1.0
|
0
|
80
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 35,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 35,
"endOffset": 25,
"startLine": 35,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 119,
"message": "Rename this variable; it shadows a builtin.",
"textRange": {
"endLine": 119,
"endOffset": 17,
"startLine": 119,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 266,
"message": "Complete the task associated to this \"TODO\" comment.",
"textRange": {
"endLine": 266,
"endOffset": 31,
"startLine": 266,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 268,
"message": "Refactor this function to reduce its Cognitive Complexity from 17 to the 15 allowed.",
"textRange": {
"endLine": 268,
"endOffset": 19,
"startLine": 268,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 272,
"message": "Define a constant instead of duplicating this literal '.data' 3 times.",
"textRange": {
"endLine": 272,
"endOffset": 64,
"startLine": 272,
"startOffset": 57
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 284,
"message": "Take the required action to fix the issue indicated by this \"FIXME\" comment.",
"textRange": {
"endLine": 284,
"endOffset": 52,
"startLine": 284,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 288,
"message": "Specify an exception class to catch or reraise the exception",
"textRange": {
"endLine": 288,
"endOffset": 14,
"startLine": 288,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 316,
"message": "Take the required action to fix the issue indicated by this \"FIXME\" comment.",
"textRange": {
"endLine": 316,
"endOffset": 78,
"startLine": 316,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 334,
"message": "Complete the task associated to this \"TODO\" comment.",
"textRange": {
"endLine": 334,
"endOffset": 30,
"startLine": 334,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 356,
"message": "Complete the task associated to this \"TODO\" comment.",
"textRange": {
"endLine": 356,
"endOffset": 48,
"startLine": 356,
"startOffset": 16
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 437,
"message": "Remove the unused local variable \"err\".",
"textRange": {
"endLine": 437,
"endOffset": 42,
"startLine": 437,
"startOffset": 39
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 448,
"message": "Take the required action to fix the issue indicated by this \"FIXME\" comment.",
"textRange": {
"endLine": 448,
"endOffset": 77,
"startLine": 448,
"startOffset": 12
},
"type": "CODE_SMELL"
}
] | 7,187 |
code/2.7/7_decorators_args.py
|
suqi/suqi-interpy-zh
| 6,594 |
10218124
|
from functools import wraps
def logit(logfile='out.log'):
def logging_decorator(func):
@wraps(func)
def wrapped_function(*args, **kwargs):
log_string = func.__name__ + ' was called'
print log_string
with open(logfile, 'a') as opened_file:
opened_file.write(log_string + '\n')
return func(*args, **kwargs)
return wrapped_function
return logging_decorator
@logit()
def myfunc1():
pass
@logit(logfile='func2.log')
def myfunc2():
pass
def main():
myfunc1()
myfunc2()
if __name__ == '__main__':
main()
| null | null | null | null | null | null | null | null | null |
[] | 12,178 |
tests/integration/test_disabled_access_control_improvements/test_row_policy.py
|
mrk-andreev/ClickHouse
| 8,629 |
9674883
|
import os
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node",
main_configs=["configs/config.d/disable_access_control_improvements.xml"],
user_configs=[
"configs/users.d/row_policy.xml",
"configs/users.d/another_user.xml",
],
)
def copy_policy_xml(local_file_name):
script_dir = os.path.dirname(os.path.realpath(__file__))
node.copy_file_to_container(
os.path.join(script_dir, local_file_name),
"/etc/clickhouse-server/users.d/row_policy.xml",
)
node.query("SYSTEM RELOAD CONFIG")
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
node.query(
"""
CREATE DATABASE mydb;
CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a;
INSERT INTO mydb.filtered_table1 values (0, 0), (0, 1), (1, 0), (1, 1);
CREATE TABLE mydb.table (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a;
INSERT INTO mydb.table values (0, 0), (0, 1), (1, 0), (1, 1);
CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a;
INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0);
CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1);
CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
INSERT INTO mydb.`.filtered_table4` values (0, 0), (0, 1), (1, 0), (1, 1);
CREATE TABLE mydb.local (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a;
"""
)
node.query("INSERT INTO mydb.local values (2, 0), (2, 1), (1, 0), (1, 1)")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def reset_policies():
try:
yield
finally:
copy_policy_xml("normal_filters.xml")
node.query("DROP POLICY IF EXISTS pA, pB ON mydb.filtered_table1")
def test_introspection():
policies = [
[
"another ON mydb.filtered_table1",
"another",
"mydb",
"filtered_table1",
"6068883a-0e9d-f802-7e22-0144f8e66d3c",
"users.xml",
"1",
0,
0,
"['another']",
"[]",
],
[
"another ON mydb.filtered_table2",
"another",
"mydb",
"filtered_table2",
"c019e957-c60b-d54e-cc52-7c90dac5fb01",
"users.xml",
"1",
0,
0,
"['another']",
"[]",
],
[
"another ON mydb.filtered_table3",
"another",
"mydb",
"filtered_table3",
"4cb080d0-44e8-dbef-6026-346655143628",
"users.xml",
"1",
0,
0,
"['another']",
"[]",
],
[
"another ON mydb.local",
"another",
"mydb",
"local",
"5b23c389-7e18-06bf-a6bc-dd1afbbc0a97",
"users.xml",
"a = 1",
0,
0,
"['another']",
"[]",
],
[
"default ON mydb.filtered_table1",
"default",
"mydb",
"filtered_table1",
"9e8a8f62-4965-2b5e-8599-57c7b99b3549",
"users.xml",
"a = 1",
0,
0,
"['default']",
"[]",
],
[
"default ON mydb.filtered_table2",
"default",
"mydb",
"filtered_table2",
"cffae79d-b9bf-a2ef-b798-019c18470b25",
"users.xml",
"a + b < 1 or c - d > 5",
0,
0,
"['default']",
"[]",
],
[
"default ON mydb.filtered_table3",
"default",
"mydb",
"filtered_table3",
"12fc5cef-e3da-3940-ec79-d8be3911f42b",
"users.xml",
"c = 1",
0,
0,
"['default']",
"[]",
],
[
"default ON mydb.local",
"default",
"mydb",
"local",
"cdacaeb5-1d97-f99d-2bb0-4574f290629c",
"users.xml",
"1",
0,
0,
"['default']",
"[]",
],
]
assert node.query(
"SELECT * from system.row_policies ORDER BY short_name, database, table"
) == TSV(policies)
def test_dcl_introspection():
assert node.query("SHOW POLICIES") == TSV(
[
"another ON mydb.filtered_table1",
"another ON mydb.filtered_table2",
"another ON mydb.filtered_table3",
"another ON mydb.local",
"default ON mydb.filtered_table1",
"default ON mydb.filtered_table2",
"default ON mydb.filtered_table3",
"default ON mydb.local",
]
)
assert node.query("SHOW POLICIES ON mydb.filtered_table1") == TSV(
["another", "default"]
)
assert node.query("SHOW POLICIES ON mydb.local") == TSV(["another", "default"])
assert node.query("SHOW POLICIES ON mydb.*") == TSV(
[
"another ON mydb.filtered_table1",
"another ON mydb.filtered_table2",
"another ON mydb.filtered_table3",
"another ON mydb.local",
"default ON mydb.filtered_table1",
"default ON mydb.filtered_table2",
"default ON mydb.filtered_table3",
"default ON mydb.local",
]
)
assert node.query("SHOW POLICIES default") == TSV(
[
"default ON mydb.filtered_table1",
"default ON mydb.filtered_table2",
"default ON mydb.filtered_table3",
"default ON mydb.local",
]
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table1")
== "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n"
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table2")
== "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n"
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table3")
== "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n"
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.local")
== "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n"
)
assert node.query("SHOW CREATE POLICY default") == TSV(
[
"CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default",
"CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default",
"CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default",
"CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default",
]
)
assert node.query("SHOW CREATE POLICIES ON mydb.filtered_table1") == TSV(
[
"CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another",
"CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default",
]
)
assert node.query("SHOW CREATE POLICIES ON mydb.*") == TSV(
[
"CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another",
"CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another",
"CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another",
"CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another",
"CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default",
"CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default",
"CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default",
"CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default",
]
)
assert node.query("SHOW CREATE POLICIES") == TSV(
[
"CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another",
"CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another",
"CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another",
"CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another",
"CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default",
"CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default",
"CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default",
"CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default",
]
)
expected_access = (
"CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another\n"
"CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another\n"
"CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another\n"
"CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another\n"
"CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n"
"CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n"
"CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n"
"CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n"
)
assert expected_access in node.query("SHOW ACCESS")
copy_policy_xml("all_rows.xml")
assert node.query("SHOW POLICIES") == TSV(
[
"another ON mydb.filtered_table1",
"another ON mydb.filtered_table2",
"another ON mydb.filtered_table3",
"default ON mydb.filtered_table1",
"default ON mydb.filtered_table2",
"default ON mydb.filtered_table3",
]
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table1")
== "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING 1 TO default\n"
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table2")
== "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING 1 TO default\n"
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table3")
== "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING 1 TO default\n"
)
copy_policy_xml("no_rows.xml")
assert node.query("SHOW POLICIES") == TSV(
[
"another ON mydb.filtered_table1",
"another ON mydb.filtered_table2",
"another ON mydb.filtered_table3",
"default ON mydb.filtered_table1",
"default ON mydb.filtered_table2",
"default ON mydb.filtered_table3",
]
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table1")
== "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING NULL TO default\n"
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table2")
== "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING NULL TO default\n"
)
assert (
node.query("SHOW CREATE POLICY default ON mydb.filtered_table3")
== "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING NULL TO default\n"
)
copy_policy_xml("no_filters.xml")
assert node.query("SHOW POLICIES") == ""
def test_dcl_management():
copy_policy_xml("no_filters.xml")
assert node.query("SHOW POLICIES") == ""
node.query("CREATE POLICY pA ON mydb.filtered_table1 FOR SELECT USING a<b")
assert node.query("SELECT * FROM mydb.filtered_table1") == ""
assert node.query("SHOW POLICIES ON mydb.filtered_table1") == "pA\n"
node.query("ALTER POLICY pA ON mydb.filtered_table1 TO default")
assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[0, 1]])
assert node.query("SHOW POLICIES ON mydb.filtered_table1") == "pA\n"
node.query("ALTER POLICY pA ON mydb.filtered_table1 FOR SELECT USING a>b")
assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0]])
node.query("ALTER POLICY pA ON mydb.filtered_table1 RENAME TO pB")
assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0]])
assert node.query("SHOW POLICIES ON mydb.filtered_table1") == "pB\n"
assert (
node.query("SHOW CREATE POLICY pB ON mydb.filtered_table1")
== "CREATE ROW POLICY pB ON mydb.filtered_table1 FOR SELECT USING a > b TO default\n"
)
node.query("DROP POLICY pB ON mydb.filtered_table1")
assert node.query("SELECT * FROM mydb.filtered_table1") == TSV(
[[0, 0], [0, 1], [1, 0], [1, 1]]
)
assert node.query("SHOW POLICIES") == ""
def test_dcl_users_with_policies_from_users_xml():
node.query("CREATE USER X")
node.query("GRANT SELECT ON mydb.filtered_table1 TO X")
assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]])
assert node.query("SELECT * FROM mydb.filtered_table1", user="X") == ""
node.query("DROP USER X")
def test_some_users_without_policies():
copy_policy_xml("no_filters.xml")
assert node.query("SHOW POLICIES") == ""
node.query("CREATE USER X, Y")
node.query("GRANT SELECT ON mydb.filtered_table1 TO X, Y")
# permissive a >= b for X, none for Y
node.query(
"CREATE POLICY pA ON mydb.filtered_table1 FOR SELECT USING a >= b AS permissive TO X"
)
assert node.query("SELECT * FROM mydb.filtered_table1", user="X") == TSV(
[[0, 0], [1, 0], [1, 1]]
)
assert node.query("SELECT * FROM mydb.filtered_table1", user="Y") == ""
# restrictive a >=b for X, none for Y
node.query("ALTER POLICY pA ON mydb.filtered_table1 AS restrictive")
assert node.query("SELECT * FROM mydb.filtered_table1", user="X") == ""
assert node.query("SELECT * FROM mydb.filtered_table1", user="Y") == ""
# permissive a >= b for X, restrictive a <= b for X, none for Y
node.query("ALTER POLICY pA ON mydb.filtered_table1 AS permissive")
node.query(
"CREATE POLICY pB ON mydb.filtered_table1 FOR SELECT USING a <= b AS restrictive TO X"
)
assert node.query("SELECT * FROM mydb.filtered_table1", user="X") == TSV(
[[0, 0], [1, 1]]
)
assert node.query("SELECT * FROM mydb.filtered_table1", user="Y") == ""
# permissive a >= b for X, restrictive a <= b for Y
node.query("ALTER POLICY pB ON mydb.filtered_table1 TO Y")
assert node.query("SELECT * FROM mydb.filtered_table1", user="X") == TSV(
[[0, 0], [1, 0], [1, 1]]
)
assert node.query("SELECT * FROM mydb.filtered_table1", user="Y") == ""
node.query("DROP POLICY pA, pB ON mydb.filtered_table1")
node.query("DROP USER X, Y")
|
23
|
0
|
0.0
|
0
|
0
|
23
|
1.0
|
0
|
8
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 74,
"message": "Define a constant instead of duplicating this literal \"another ON mydb.filtered_table1\" 5 times.",
"textRange": {
"endLine": 74,
"endOffset": 45,
"startLine": 74,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 79,
"message": "Define a constant instead of duplicating this literal \"users.xml\" 8 times.",
"textRange": {
"endLine": 79,
"endOffset": 23,
"startLine": 79,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 83,
"message": "Define a constant instead of duplicating this literal \"['another']\" 4 times.",
"textRange": {
"endLine": 83,
"endOffset": 25,
"startLine": 83,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 87,
"message": "Define a constant instead of duplicating this literal \"another ON mydb.filtered_table2\" 5 times.",
"textRange": {
"endLine": 87,
"endOffset": 45,
"startLine": 87,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 100,
"message": "Define a constant instead of duplicating this literal \"another ON mydb.filtered_table3\" 5 times.",
"textRange": {
"endLine": 100,
"endOffset": 45,
"startLine": 100,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 113,
"message": "Define a constant instead of duplicating this literal \"another ON mydb.local\" 3 times.",
"textRange": {
"endLine": 113,
"endOffset": 35,
"startLine": 113,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 126,
"message": "Define a constant instead of duplicating this literal \"default ON mydb.filtered_table1\" 6 times.",
"textRange": {
"endLine": 126,
"endOffset": 45,
"startLine": 126,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 135,
"message": "Define a constant instead of duplicating this literal \"['default']\" 4 times.",
"textRange": {
"endLine": 135,
"endOffset": 25,
"startLine": 135,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 139,
"message": "Define a constant instead of duplicating this literal \"default ON mydb.filtered_table2\" 6 times.",
"textRange": {
"endLine": 139,
"endOffset": 45,
"startLine": 139,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 152,
"message": "Define a constant instead of duplicating this literal \"default ON mydb.filtered_table3\" 6 times.",
"textRange": {
"endLine": 152,
"endOffset": 45,
"startLine": 152,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 165,
"message": "Define a constant instead of duplicating this literal \"default ON mydb.local\" 4 times.",
"textRange": {
"endLine": 165,
"endOffset": 35,
"startLine": 165,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 184,
"message": "Define a constant instead of duplicating this literal \"SHOW POLICIES\" 7 times.",
"textRange": {
"endLine": 184,
"endOffset": 37,
"startLine": 184,
"startOffset": 22
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 197,
"message": "Define a constant instead of duplicating this literal \"SHOW POLICIES ON mydb.filtered_table1\" 4 times.",
"textRange": {
"endLine": 197,
"endOffset": 61,
"startLine": 197,
"startOffset": 22
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 223,
"message": "Define a constant instead of duplicating this literal \"SHOW CREATE POLICY default ON mydb.filtered_table1\" 3 times.",
"textRange": {
"endLine": 223,
"endOffset": 71,
"startLine": 223,
"startOffset": 19
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 227,
"message": "Define a constant instead of duplicating this literal \"SHOW CREATE POLICY default ON mydb.filtered_table2\" 3 times.",
"textRange": {
"endLine": 227,
"endOffset": 71,
"startLine": 227,
"startOffset": 19
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 231,
"message": "Define a constant instead of duplicating this literal \"SHOW CREATE POLICY default ON mydb.filtered_table3\" 3 times.",
"textRange": {
"endLine": 231,
"endOffset": 71,
"startLine": 231,
"startOffset": 19
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 241,
"message": "Define a constant instead of duplicating this literal \"CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\" 4 times.",
"textRange": {
"endLine": 241,
"endOffset": 97,
"startLine": 241,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 242,
"message": "Define a constant instead of duplicating this literal \"CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\" 3 times.",
"textRange": {
"endLine": 242,
"endOffset": 122,
"startLine": 242,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 243,
"message": "Define a constant instead of duplicating this literal \"CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\" 3 times.",
"textRange": {
"endLine": 243,
"endOffset": 97,
"startLine": 243,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 244,
"message": "Define a constant instead of duplicating this literal \"CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\" 3 times.",
"textRange": {
"endLine": 244,
"endOffset": 83,
"startLine": 244,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 249,
"message": "Define a constant instead of duplicating this literal \"CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another\" 3 times.",
"textRange": {
"endLine": 249,
"endOffset": 93,
"startLine": 249,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 338,
"message": "Define a constant instead of duplicating this literal \"no_filters.xml\" 3 times.",
"textRange": {
"endLine": 338,
"endOffset": 36,
"startLine": 338,
"startOffset": 20
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 347,
"message": "Define a constant instead of duplicating this literal \"SELECT * FROM mydb.filtered_table1\" 15 times.",
"textRange": {
"endLine": 347,
"endOffset": 58,
"startLine": 347,
"startOffset": 22
},
"type": "CODE_SMELL"
}
] | 5,922 |
recipes/Python/576505_Sudoku_solvercreator/recipe-576505.py
|
tdiprima/code
| 2,023 |
515289
|
<reponame>tdiprima/code<gh_stars>1000+
#!/usr/bin/python
# TODO: Make solve function faster!!! Have it check for singles, doubles,
# triples, and quads both naked and hidden
from random import random
def rand(lst):
"returns a random element in list or integer"
if type(lst)==type([]):
return lst[int(random()*len(lst))]
elif type(lst)==type(0):
return int(random()*lst)
else:
raise Exception,"don't know what do do with type %s!!!"%type(lst)
def reorder(lst):
"reorders a list to a random order"
ret=[]
for item in lst:
ret.insert(rand(len(ret)),item)
return ret
def row(row,puzzle):
return puzzle[row*9:row*9+9]
def col(col,puzzle):
ret=[]
for i in range(9):
ret.append(row(i,puzzle)[col])
return ret
def box(box,puzzle):
x=box%3
if box<3:
y=0
elif box<6:
y=1
else:
y=2
ret=[]
for i in range(3):
ret.extend(row(y*3+i,puzzle)[x*3:x*3+3])
return ret
def remaining(wcb):
ret=[]
for i in range(1,10):
if not i in wcb:
ret.append(i)
return reorder(ret) # does not significantly slow program
# and allows for generation of random puzzles
def coordToBox(x,y):
box=0
if x<3:
pass
elif x<6:
box+=1
else:
box+=2
if y<3:
pass
elif y<6:
box+=3
else:
box+=6
return box
def coordToLinear(x,y):
return y*9+x
def linearToCoord(index):
y=8
for i in range(9):
if index<i*9:
y-=1
x=index%9
return x,y
def possible(x,y,puzzle):
if not puzzle[coordToLinear(x,y)]==0:
return [puzzle[coordToLinear(x,y)]]
imp=[]
imp.extend(row(y,puzzle))
imp.extend(col(x,puzzle))
imp.extend(box(coordToBox(x,y),puzzle))
return remaining(imp)
def printPuzzle(puzzle):
string=((((("%s "*3)+"| ")*2+("%s "*3)+"\n")*3+"------|-------|------\n")*3)[:-22]+"\n"
text=(string%tuple(puzzle)).replace("0","_")
print text,
return text
def check(x,y,puzzle):
for i in range(9):
if not i==y and len(possible(x,i,puzzle))==0:
return False
if not i==x and len(possible(i,y,puzzle))==0:
return False
box_x,box_y=linearToCoord(coordToBox(x,y))
for i in range(box_x,box_x+3):
if i==x:
break
for j in range(box_y,box_y+3):
if j==y:
break
if len(possible(i,j,puzzle))==0:
return False
return True
def solve(puzzle,start=0): # TODO: Make this function faster!!!
if start==81:
return [puzzle[:]]
ret=[]
x,y=linearToCoord(start)
possibilities=possible(x,y,puzzle)
if len(possibilities)==0:
return
for possibility in possibilities:
p=puzzle[:]
p[coordToLinear(x,y)]=possibility
x,y=linearToCoord(start)
if not check(x,y,puzzle):
continue
solved=solve(p,start+1)
if solved:
ret.extend(solved)
if 1<len(ret): # there is more than one puzzle
return ret # enough already!!!
return ret
def solve_no_check_for_dups(puzzle,start=0):
"This solver function does not check for multiple solutions."
if start==81:
return puzzle[:]
x,y=linearToCoord(start)
possibilities=possible(x,y,puzzle)
if len(possibilities)==0:
return
for possibility in possibilities:
p=puzzle[:]
p[coordToLinear(x,y)]=possibility
x,y=linearToCoord(start)
if not check(x,y,puzzle):
continue
solved=solve_no_check_for_dups(p,start+1)
if solved:
return solved
return []
def generate(sym=True,goodness=0): # goodness=0 means evil
if sym:
RANGE=41
else:
RANGE=81
puzzle=[0]*81
soln=solve_no_check_for_dups(puzzle)
puzzle=soln[:]
spaces=range(RANGE)
for i in range(RANGE-goodness):
space=spaces.pop(rand(len(spaces)))
puzzle[space]=0
if sym:
puzzle[80-space]=0
if 1<len(solve(puzzle)):
puzzle[space]=soln[space]
if sym:
puzzle[80-space]=soln[80-space]
return puzzle
#puzzle=[]
#for i in range(9):
# puzzle.extend(map(int,raw_input().split()))
try:
import psyco
psyco.full()
except ImportError:
print "You do not have psyco installed. The program will run slower."
if __name__=="__main__":
#puzzle=generate()
#printPuzzle(puzzle)
#soln=solve(puzzle)
#printPuzzle(soln[0])
#if 1<len(soln):
# print "More than one solution!!!"
#puzzle=generate(sym=False)
#printPuzzle(puzzle)
#soln=solve(puzzle)
#printPuzzle(soln[0])
#if 1<len(soln):
# print "More than one solution!!!"
from time import sleep
while True:
puzzle=generate(sym=False)
text=printPuzzle(puzzle)
f=open("./sudoku","a")
f.write(text)
f.close()
sleep(180)
|
19
|
0
|
0.0
|
71
|
0
|
19
|
1.0
|
2
|
55
|
[
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 3,
"message": "Complete the task associated to this \"TODO\" comment.",
"textRange": {
"endLine": 3,
"endOffset": 73,
"startLine": 3,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 15,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 15,
"endOffset": 17,
"startLine": 15,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 49,
"message": "Use the opposite operator (\"not in\") instead.",
"textRange": {
"endLine": 49,
"endOffset": 17,
"startLine": 49,
"startOffset": 5
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 54,
"message": "Rename function \"coordToBox\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 54,
"endOffset": 14,
"startLine": 54,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 57,
"message": "Either remove or fill this block of code.",
"textRange": {
"endLine": 57,
"endOffset": 6,
"startLine": 57,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 63,
"message": "Either remove or fill this block of code.",
"textRange": {
"endLine": 63,
"endOffset": 6,
"startLine": 63,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 70,
"message": "Rename function \"coordToLinear\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 70,
"endOffset": 17,
"startLine": 70,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 73,
"message": "Rename function \"linearToCoord\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 73,
"endOffset": 17,
"startLine": 73,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 82,
"message": "Use the opposite operator (\"!=\") instead.",
"textRange": {
"endLine": 82,
"endOffset": 37,
"startLine": 82,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 90,
"message": "Rename function \"printPuzzle\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 90,
"endOffset": 15,
"startLine": 90,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 93,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 93,
"endOffset": 6,
"startLine": 93,
"startOffset": 1
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 96,
"message": "Refactor this function to reduce its Cognitive Complexity from 18 to the 15 allowed.",
"textRange": {
"endLine": 96,
"endOffset": 9,
"startLine": 96,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 98,
"message": "Use the opposite operator (\"!=\") instead.",
"textRange": {
"endLine": 98,
"endOffset": 13,
"startLine": 98,
"startOffset": 5
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 100,
"message": "Use the opposite operator (\"!=\") instead.",
"textRange": {
"endLine": 100,
"endOffset": 13,
"startLine": 100,
"startOffset": 5
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 113,
"message": "Complete the task associated to this \"TODO\" comment.",
"textRange": {
"endLine": 113,
"endOffset": 63,
"startLine": 113,
"startOffset": 27
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 162,
"message": "Remove the unused local variable \"i\".",
"textRange": {
"endLine": 162,
"endOffset": 6,
"startLine": 162,
"startOffset": 5
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 173,
"message": "Remove this commented out code.",
"textRange": {
"endLine": 173,
"endOffset": 10,
"startLine": 173,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 181,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 181,
"endOffset": 6,
"startLine": 181,
"startOffset": 1
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 184,
"message": "Remove this commented out code.",
"textRange": {
"endLine": 184,
"endOffset": 19,
"startLine": 184,
"startOffset": 1
},
"type": "CODE_SMELL"
}
] | 10,392 |
algorithms/tree/traversal/__init__.py
|
Heart-throb-Rajnish/algorithms
| 22,426 |
3220141
|
<reponame>Heart-throb-Rajnish/algorithms<filename>algorithms/tree/traversal/__init__.py<gh_stars>1000+
from .preorder import *
from .postorder import *
from .inorder import *
| null | null | null | null | null | null | null | null | null |
[] | 2,144 |
V3/server/cmdline.py
|
webpics/style2paints
| 15,281 |
191101
|
<reponame>webpics/style2paints<filename>V3/server/cmdline.py<gh_stars>1000+
#!/usr/bin/env python3
import os
import sys
for f in ['baby.net', 'head.net', 'neck.net', 'tail.net', 'reader.net', 'girder.net']:
if not os.path.exists(f):
print('Error: neural network models not downloaded!')
print('Download them here, and put to current directory:')
print('https://drive.google.com/open?id=1fWi4wmNj-xr-nCzuWMsN2rcm0249_Aem')
exit(1)
if len(sys.argv) <= 2:
print('Usage: %s <imagefile> <outputfile> [colorization|rendering|recolorization] [x y color x y color ...]' % sys.argv[0])
print('Example: %s sketch.jpg colorized.jpg colorization 0.5 0.25 77ee00 0.5 0.75 0011cc' % sys.argv[0])
exit(1)
from config import *
import re
import cv2
import time
import json
import base64
import shutil
import datetime
import threading
import numpy as np
from ai import *
from tricks import *
sketch_upload_pool = []
painting_pool = []
options_str = '{"alpha":0,"points":[],"lineColor":[0,0,0],"line":false,"hasReference":false}'
def get_request_image(fname):
img = np.fromfile(fname, dtype=np.uint8)
img = cv2.imdecode(img, -1)
return img
def handle_sketch_upload_pool():
room, sketch, method = sketch_upload_pool[0]
del sketch_upload_pool[0]
room_path = 'game/rooms/' + room
print('processing sketch in ' + room_path)
improved_sketch = sketch.copy()
improved_sketch = min_resize(improved_sketch, 512)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = go_tail(improved_sketch)
cv2.imwrite(room_path + '/sketch.improved.jpg', improved_sketch)
color_sketch = improved_sketch.copy()
std = cal_std(color_sketch)
print('std = ' + str(std))
need_de_painting = (std > 100.0) and method == 'rendering'
if method=='recolorization' or need_de_painting:
improved_sketch = go_passline(color_sketch)
improved_sketch = min_k_down_c(improved_sketch, 2)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = go_tail(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
cv2.imwrite(room_path + '/sketch.recolorization.jpg', min_black(improved_sketch))
if need_de_painting:
cv2.imwrite(room_path + '/sketch.de_painting.jpg', min_black(improved_sketch))
print('In rendering mode, the user has uploaded a painting, and I have translated it into a sketch.')
print('sketch lined')
cv2.imwrite(room_path + '/sketch.colorization.jpg', min_black(color_sketch))
cv2.imwrite(room_path + '/sketch.rendering.jpg', eye_black(color_sketch))
print('sketch improved')
def handle_painting_pool():
room, ID, sketch, alpha, reference, points, method, lineColor, line = painting_pool[0]
del painting_pool[0]
room_path = 'game/rooms/' + room
print('processing painting in ' + room_path)
sketch_1024 = k_resize(sketch, 64)
if os.path.exists(room_path + '/sketch.de_painting.jpg') and method == 'rendering':
vice_sketch_1024 = k_resize(cv2.imread(room_path + '/sketch.de_painting.jpg', cv2.IMREAD_GRAYSCALE), 64)
sketch_256 = mini_norm(k_resize(min_k_down(vice_sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(vice_sketch_1024, 4), 32))
else:
sketch_256 = mini_norm(k_resize(min_k_down(sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(sketch_1024, 4), 32))
print('sketch prepared')
if debugging:
cv2.imwrite(room_path + '/sketch.128.jpg', sketch_128)
cv2.imwrite(room_path + '/sketch.256.jpg', sketch_256)
baby = go_baby(sketch_128, opreate_normal_hint(ini_hint(sketch_128), points, type=0, length=1))
baby = de_line(baby, sketch_128)
for _ in range(16):
baby = blur_line(baby, sketch_128)
baby = go_tail(baby)
baby = clip_15(baby)
if debugging:
cv2.imwrite(room_path + '/baby.' + ID + '.jpg', baby)
print('baby born')
composition = go_gird(sketch=sketch_256, latent=d_resize(baby, sketch_256.shape), hint=ini_hint(sketch_256))
if line:
composition = emph_line(composition, d_resize(min_k_down(sketch_1024, 2), composition.shape), lineColor)
composition = go_tail(composition)
cv2.imwrite(room_path + '/composition.' + ID + '.jpg', composition)
print('composition saved')
painting_function = go_head
if method == 'rendering':
painting_function = go_neck
print('method: ' + method)
result = painting_function(
sketch=sketch_1024,
global_hint=k_resize(composition, 14),
local_hint=opreate_normal_hint(ini_hint(sketch_1024), points, type=2, length=2),
global_hint_x=k_resize(reference, 14) if reference is not None else k_resize(composition, 14),
alpha=(1 - alpha) if reference is not None else 1
)
result = go_tail(result)
cv2.imwrite(room_path + '/result.' + ID + '.jpg', result)
if debugging:
cv2.imwrite(room_path + '/icon.' + ID + '.jpg', max_resize(result, 128))
return room_path + '/result.' + ID + '.jpg'
def upload_sketch(inputfilename, method):
ID = datetime.datetime.now().strftime('H%HM%MS%S')
room = datetime.datetime.now().strftime('%b%dH%HM%MS%S') + 'R' + str(np.random.randint(100, 999))
room_path = 'game/rooms/' + room
os.makedirs(room_path, exist_ok=True)
sketch = from_png_to_jpg(get_request_image(inputfilename))
cv2.imwrite(room_path + '/sketch.original.jpg', sketch)
print('original_sketch saved')
print('sketch upload pool get request: ' + method)
sketch_upload_pool.append((room, sketch, method))
return room
def request_result(room, method, points):
ID = datetime.datetime.now().strftime('H%HM%MS%S')
room_path = 'game/rooms/' + room
if debugging:
with open(room_path + '/options.' + ID + '.json', 'w') as f:
f.write(options_str)
options = json.loads(options_str)
sketch = cv2.imread(room_path + '/sketch.' + method + '.jpg', cv2.IMREAD_GRAYSCALE)
alpha = float(options["alpha"])
for _ in range(len(points)):
points[_][1] = 1 - points[_][1]
reference = None
print('request result room = ' + str(room) + ', ID = ' + str(ID))
lineColor = np.array(options["lineColor"])
line = options["line"]
painting_pool.append([room, ID, sketch, alpha, reference, points, method, lineColor, line])
os.makedirs('game/rooms', exist_ok=True)
method = 'colorization' if len(sys.argv) <= 3 else sys.argv[3]
points = []
i = 4
while len(sys.argv) > i:
x = float(sys.argv[i])
y = float(sys.argv[i+1])
h = sys.argv[i+2]
r = int(h[0:2], 16)
g = int(h[2:4], 16)
b = int(h[4:6], 16)
points.append([x, y, r, g, b, 0])
i += 3
room = upload_sketch(sys.argv[1], method)
handle_sketch_upload_pool()
request_result(room, method, points)
result = handle_painting_pool()
shutil.copyfile(result, sys.argv[2])
|
9
|
0
|
0.0
|
24
|
0
|
9
|
1.0
|
0
|
26
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 19,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 19,
"endOffset": 20,
"startLine": 19,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 31,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 31,
"endOffset": 16,
"startLine": 31,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 32,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 32,
"endOffset": 20,
"startLine": 32,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 49,
"message": "Define a constant instead of duplicating this literal 'game/rooms/' 4 times.",
"textRange": {
"endLine": 49,
"endOffset": 29,
"startLine": 49,
"startOffset": 16
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 69,
"message": "Define a constant instead of duplicating this literal '/sketch.de_painting.jpg' 3 times.",
"textRange": {
"endLine": 69,
"endOffset": 61,
"startLine": 69,
"startOffset": 36
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 78,
"message": "Rename this local variable \"lineColor\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 78,
"endOffset": 65,
"startLine": 78,
"startOffset": 56
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 128,
"message": "Remove the unused local variable \"ID\".",
"textRange": {
"endLine": 128,
"endOffset": 6,
"startLine": 128,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
},
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
},
{
"severity": "LOW",
"softwareQuality": "SECURITY"
}
],
"line": 129,
"message": "Use a \"numpy.random.Generator\" here instead of this legacy function.",
"textRange": {
"endLine": 129,
"endOffset": 90,
"startLine": 129,
"startOffset": 73
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 153,
"message": "Rename this local variable \"lineColor\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 153,
"endOffset": 13,
"startLine": 153,
"startOffset": 4
},
"type": "CODE_SMELL"
}
] | 1,160 |
kivy/modules/__init__.py
|
yunus-ceyhan/kivy
| 13,889 |
3289583
|
'''
Modules
=======
Modules are classes that can be loaded when a Kivy application is starting. The
loading of modules is managed by the config file. Currently, we include:
* :class:`~kivy.modules.touchring`: Draw a circle around each touch.
* :class:`~kivy.modules.monitor`: Add a red topbar that indicates the FPS
and a small graph indicating input activity.
* :class:`~kivy.modules.keybinding`: Bind some keys to actions, such as a
screenshot.
* :class:`~kivy.modules.recorder`: Record and playback a sequence of
events.
* :class:`~kivy.modules.screen`: Emulate the characteristics (dpi/density/
resolution) of different screens.
* :class:`~kivy.modules.inspector`: Examines your widget hierarchy and
widget properties.
* :class:`~kivy.modules.webdebugger`: Realtime examination of your app
internals via a web browser.
* :class:`~kivy.modules.joycursor`: Navigate in your app with a joystick.
* :class:`~kivy.modules.showborder`: Show widget's border.
Modules are automatically loaded from the Kivy path and User path:
* `PATH_TO_KIVY/kivy/modules`
* `HOME/.kivy/mods`
Activating a module
-------------------
There are various ways in which you can activate a kivy module.
Activate a module in the config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To activate a module this way, you can edit your configuration file (in your
`HOME/.kivy/config.ini`)::
[modules]
# uncomment to activate
touchring =
# monitor =
# keybinding =
Only the name of the module followed by "=" is sufficient to activate the
module.
Activate a module in Python
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Before starting your application, preferably at the start of your import, you
can do something like this::
import kivy
kivy.require('1.0.8')
# Activate the touchring module
from kivy.config import Config
Config.set('modules', 'touchring', '')
Activate a module via the commandline
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When starting your application from the commandline, you can add a
*-m <modulename>* to the arguments. For example::
python main.py -m webdebugger
.. note::
Some modules, such as the screen, may require additional parameters. They
will, however, print these parameters to the console when launched without
them.
Create your own module
----------------------
Create a file in your `HOME/.kivy/mods`, and create 2 functions::
def start(win, ctx):
pass
def stop(win, ctx):
pass
Start/stop are functions that will be called for every window opened in
Kivy. When you are starting a module, you can use these to store and
manage the module state. Use the `ctx` variable as a dictionary. This
context is unique for each instance/start() call of the module, and will
be passed to stop() too.
'''
__all__ = ('Modules', )
from kivy.config import Config
from kivy.logger import Logger
import kivy
import importlib
import os
import sys
class ModuleContext:
'''Context of a module
You can access to the config with self.config.
'''
def __init__(self):
self.config = {}
def __repr__(self):
return repr(self.config)
class ModuleBase:
'''Handle Kivy modules. It will automatically load and instantiate the
module for the general window.'''
def __init__(self, **kwargs):
self.mods = {}
self.wins = []
def add_path(self, path):
'''Add a path to search for modules in'''
if not os.path.exists(path):
return
if path not in sys.path:
sys.path.append(path)
dirs = os.listdir(path)
for module in dirs:
name, ext = os.path.splitext(module)
# accept only python extensions
if ext not in ('.py', '.pyo', '.pyc') or name == '__init__':
continue
self.mods[name] = {
'name': name,
'activated': False,
'context': ModuleContext()}
def list(self):
'''Return the list of available modules'''
return self.mods
def import_module(self, name):
try:
modname = 'kivy.modules.{0}'.format(name)
module = importlib.__import__(name=modname)
module = sys.modules[modname]
except ImportError:
try:
module = importlib.__import__(name=name)
module = sys.modules[name]
except ImportError:
Logger.exception('Modules: unable to import <%s>' % name)
# protect against missing module dependency crash
self.mods[name]['module'] = None
return
# basic check on module
if not hasattr(module, 'start'):
Logger.warning('Modules: Module <%s> missing start() function' %
name)
return
if not hasattr(module, 'stop'):
err = 'Modules: Module <%s> missing stop() function' % name
Logger.warning(err)
return
self.mods[name]['module'] = module
def activate_module(self, name, win):
'''Activate a module on a window'''
if name not in self.mods:
Logger.warning('Modules: Module <%s> not found' % name)
return
mod = self.mods[name]
# ensure the module has been configured
if 'module' not in mod:
self._configure_module(name)
pymod = mod['module']
if not mod['activated']:
context = mod['context']
msg = 'Modules: Start <{0}> with config {1}'.format(
name, context)
Logger.debug(msg)
pymod.start(win, context)
mod['activated'] = True
def deactivate_module(self, name, win):
'''Deactivate a module from a window'''
if name not in self.mods:
Logger.warning('Modules: Module <%s> not found' % name)
return
if 'module' not in self.mods[name]:
return
module = self.mods[name]['module']
if self.mods[name]['activated']:
module.stop(win, self.mods[name]['context'])
self.mods[name]['activated'] = False
def register_window(self, win):
'''Add the window to the window list'''
if win not in self.wins:
self.wins.append(win)
self.update()
def unregister_window(self, win):
'''Remove the window from the window list'''
if win in self.wins:
self.wins.remove(win)
self.update()
def update(self):
'''Update the status of the module for each window'''
modules_to_activate = [x[0] for x in Config.items('modules')]
for win in self.wins:
for name in self.mods:
if name not in modules_to_activate:
self.deactivate_module(name, win)
for name in modules_to_activate:
try:
self.activate_module(name, win)
except:
import traceback
traceback.print_exc()
raise
def configure(self):
'''(internal) Configure all the modules before using them.
'''
modules_to_configure = [x[0] for x in Config.items('modules')]
for name in modules_to_configure:
if name not in self.mods:
Logger.warning('Modules: Module <%s> not found' % name)
continue
self._configure_module(name)
def _configure_module(self, name):
if 'module' not in self.mods[name]:
try:
self.import_module(name)
except ImportError:
return
# convert configuration like:
# -m mjpegserver:port=8080,fps=8
# and pass it in context.config token
config = dict()
args = Config.get('modules', name)
if args != '':
values = Config.get('modules', name).split(',')
for value in values:
x = value.split('=', 1)
if len(x) == 1:
config[x[0]] = True
else:
config[x[0]] = x[1]
self.mods[name]['context'].config = config
# call configure if module have one
if hasattr(self.mods[name]['module'], 'configure'):
self.mods[name]['module'].configure(config)
def usage_list(self):
print('Available modules')
print('=================')
for module in sorted(self.list()):
if 'module' not in self.mods[module]:
self.import_module(module)
# ignore modules without docstring
if not self.mods[module]['module'].__doc__:
continue
text = self.mods[module]['module'].__doc__.strip("\n ")
text = text.split('\n')
# make sure we don't get IndexError along the way
# then pretty format the header
if len(text) > 2:
if text[1].startswith('='):
# '\n%-12s: %s' -> 12 spaces + ": "
text[1] = '=' * (14 + len(text[1]))
text = '\n'.join(text)
print('\n%-12s: %s' % (module, text))
Modules = ModuleBase()
Modules.add_path(kivy.kivy_modules_dir)
if 'KIVY_DOC' not in os.environ:
Modules.add_path(kivy.kivy_usermodules_dir)
if __name__ == '__main__':
print(Modules.list())
|
1
|
0
|
0.0
|
56
|
0
|
1
|
1.0
|
0
|
47
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 175,
"message": "Define a constant instead of duplicating this literal 'Modules: Module <%s> not found' 3 times.",
"textRange": {
"endLine": 175,
"endOffset": 59,
"startLine": 175,
"startOffset": 27
},
"type": "CODE_SMELL"
}
] | 2,574 |
recipes/Python/360698_Extending_pythprolog_syntax/recipe-360698.py
|
tdiprima/code
| 2,023 |
3274523
|
<reponame>tdiprima/code
#
# pythologic2.py
#
# Add logic programming (Prolog) syntax and *resolution* into Python.
#
# (c) 2004 <NAME>
# after (c) 2004 <NAME>
# and AIMA examples
#
import string
import copy
class Struct:
def __init__(self, database, head, subs):
"""
The head and subs are essential - what makes this struct.
The database should only be used while structs are constructed,
and later removed.
"""
self.database = database
self.head = head
self.subs = subs
def __pos__(self):
"""
unary + means insert into database as fact
"""
self.database.add_fact(self)
def __invert__(self):
"""
unary ~ means insert into database as query
"""
self.database.add_query(self)
def __lshift__(self, requisites):
"""
The ideal is
consequent(args) << cond1(args1),...
for now we must do with
consequent(args) << [cond1(args1),...]
"""
self.database.add_conditional(self, requisites)
def __str__(self):
subs = map (str, self.subs)
return str(self.head) + "(" + string.join(subs,',') + ")"
class Symbol:
def __init__ (self, name, database):
self.name = name
self.database = database
def __call__(self, *args):
return Struct(self.database, self, args)
def __str__(self):
return self.name
class Constant(Symbol):
"""
A constant is a name. Its value is its name too.
"""
def value(self): return self.name
class Variable(Symbol):
pass
def symbol(name, database):
if (name[0] in string.uppercase):
return Variable(name,database)
else:
return Constant(name, database)
class Database:
def __init__(self, name):
self.name= name
self.facts = []
self.conditionals = []
self.queries = []
def add_fact(self, fact):
self.facts.append(fact)
def add_query(self, query):
self.queries.append(query)
def add_conditional(self,head,requisites):
if not(isinstance(requisites, list)):
requisites = [requisites]
self.conditionals.append((head,requisites))
def __str__(self):
factsStr= string.join(map(str, self.facts),'\n')
condsStr= ''
for (h,r) in self.conditionals:
condsStr = condsStr + "%s << %s\n"%(h,string.join( map(str, r), ', '))
queryStr= string.join( map(str, self.queries),'\n')
return self.name + ' facts\n' + factsStr +'\n'+self.name + ' conditionals\n'+ condsStr + '\n'+self.name + ' queries\n'+queryStr + '\n'
def append(self, func):
"""
Include definitions from func into database
"""
try:
code = func.func_code
except:
raise TypeError, "function or method argument expected"
names = code.co_names
locally_defined = code.co_varnames
globally_defined = func.func_globals.keys()
defined = locally_defined+tuple(globally_defined)
undefined = [name for name in names if name not in defined]
newglobals = func.func_globals.copy()
for name in undefined:
newglobals[name] = symbol(name, self)
exec code in newglobals
def __lshift__(self, func):
"""
A helper for decorator implementation
"""
self.append(func)
return LogicalFunction(self, func)
def solve(self, V = [{}]):
"""
The query queue is LIFO:
Extend valuations in V satisfying the last query.
"""
def solve1( v ):
# get solutions from facts
unify_facts = [unify(query, fact, v) for fact in self.facts]
# look for solutions from conditionals
unify_conditionals = []
for ( header , condition_list ) in self.conditionals:
u = unify(query, header , v) # unify headers
U = [ u ]
if u != None:
# remember query queue
oldQueries = copy.deepcopy(self.queries)
# we want to start by the first conditional
D = copy.copy( condition_list )
D.reverse()
# phase 1: append the conditionals to query queue
for condition in D:
if type( condition ) == type('string'):
# process python code
# should return True or False
self.queries.append( condition )
#eval_python_string( condition , u)
else:
# append the conditional,
# with variables replaced according to u
# to the query queue
unified_condition = subst(u, condition )
self.queries.append( unified_condition )
# phase 2: solve the appended conditionals
for condition in D:
U = self.solve( U )
# restore query queue
self.queries = oldQueries
# grow the list of solutions
unify_conditionals = unify_conditionals + U
return [ u for u in (unify_facts + unify_conditionals) if not u in [None, {}] ]
if self.queries:
query = self.queries[-1]
del self.queries[-1]
else:
return []
if type( query ) == type( 'string' ):
U = [ v for v in V if python_eval_string(query, v) ]
else:
U = []
for v in V:
U = U + solve1(v)
return U
def python_eval_string(s, v):
for k in v:
s=string.replace(s, str(k), str(v[k]))
return eval( s, {} )
def subst(v, x):
if v.has_key(x):
return v[x]
elif isinstance(x, Variable):
return x
elif isinstance(x, Struct):
return Struct( x.database, x.head, [subst(v, xi) for xi in x.subs])
def unify(x,y,v={}):
"""
Find one valuation extending v and unifying x with y
"""
def extend(v, x, t):
"""
Extend valuation v with v[x] = t
"""
v1 = copy.copy(v)
v1[x] = t
return v1
def occur_check(x, t):
"""
Test if the variable x occurr in structure t
"""
if x == t:
return True
elif isinstance(t, Struct):
return t.head == x.head or occur_check(x, t.subs)
return False
def unify_var(x, t, v):
"""
Test if v can be extended with v[x] = t;
In that case return the extention
Else return None
"""
if x in v:
return unify( v[ x ], t, v)
elif occur_check(x, t):
return None
else:
return extend(v, x, t)
if v == None:
return None
elif x == y:
return v
elif isinstance(x, Variable):
return unify_var(x, y, v)
elif isinstance(y, Variable):
return unify_var(y, x, v)
elif isinstance(x, Struct) and isinstance(y, Struct) and (x.head == y.head):
z = v
n = len(x.subs)
m = len(y.subs)
if n == m:
for i in range( n ):
z = unify( x.subs[i], y.subs[i], z)
return z
else:
return None
else:
return None
class LogicalFunction:
"""
This class replaces a logical function once it has
been consulted, to avoid erroneous use
"""
def __init__(self, database, func):
self.database=database
self.logical_function=func
def __call__(self):
raise TypeError, "Logical functions are not really callable"
if __name__ == "__main__":
db = Database('TEST')
print "Defining a prolog program... ",
def prolog_func():
# prolog facts are prefixed with "+"
+ number(0)
+ number(1)
+ number(2)
+ number(3)
+ number(4)
# prolog conditionals have the pattern p << [q1, ..., qn]
test(X, Y) << [number(X), number(Y), 'X==2*Y' ]
# prolog queries are prefixed with "~"
~ test(X, Y)
# Update the database
db << prolog_func
print "done"
print "Before solving"
print db
# Solve the queries
x = db.solve()
print 'Solutions'
for v in x:
for k in v: print k,"=", v[k],' ',
print
print "After solving"
print db
|
25
|
6
|
0.0
|
62
|
0
|
19
|
1.0
|
0
|
56
|
[
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 96,
"message": "Rename this local variable \"factsStr\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 96,
"endOffset": 16,
"startLine": 96,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 97,
"message": "Rename this local variable \"condsStr\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 97,
"endOffset": 16,
"startLine": 97,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 100,
"message": "Rename this local variable \"queryStr\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 100,
"endOffset": 16,
"startLine": 100,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 109,
"message": "Specify an exception class to catch or reraise the exception",
"textRange": {
"endLine": 109,
"endOffset": 14,
"startLine": 109,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 119,
"message": "Do not use exec statement.",
"textRange": {
"endLine": 119,
"endOffset": 12,
"startLine": 119,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 128,
"message": "Rename this parameter \"V\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 128,
"endOffset": 21,
"startLine": 128,
"startOffset": 20
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 128,
"message": "Refactor this function to reduce its Cognitive Complexity from 25 to the 15 allowed.",
"textRange": {
"endLine": 128,
"endOffset": 13,
"startLine": 128,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 145,
"message": "Rename this local variable \"oldQueries\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 145,
"endOffset": 30,
"startLine": 145,
"startOffset": 20
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 174,
"message": "Use the opposite operator (\"not in\") instead.",
"textRange": {
"endLine": 174,
"endOffset": 89,
"startLine": 174,
"startOffset": 70
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 205,
"message": "Refactor this function to reduce its Cognitive Complexity from 21 to the 15 allowed.",
"textRange": {
"endLine": 205,
"endOffset": 9,
"startLine": 205,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 278,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 278,
"endOffset": 9,
"startLine": 278,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 283,
"message": "Remove or refactor this statement; it has no side effects.",
"textRange": {
"endLine": 283,
"endOffset": 19,
"startLine": 283,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 284,
"message": "Remove or refactor this statement; it has no side effects.",
"textRange": {
"endLine": 284,
"endOffset": 19,
"startLine": 284,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 285,
"message": "Remove or refactor this statement; it has no side effects.",
"textRange": {
"endLine": 285,
"endOffset": 19,
"startLine": 285,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 286,
"message": "Remove or refactor this statement; it has no side effects.",
"textRange": {
"endLine": 286,
"endOffset": 19,
"startLine": 286,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 287,
"message": "Remove or refactor this statement; it has no side effects.",
"textRange": {
"endLine": 287,
"endOffset": 19,
"startLine": 287,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 293,
"message": "Remove or refactor this statement; it has no side effects.",
"textRange": {
"endLine": 293,
"endOffset": 20,
"startLine": 293,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 297,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 297,
"endOffset": 9,
"startLine": 297,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 299,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 299,
"endOffset": 9,
"startLine": 299,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 300,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 300,
"endOffset": 9,
"startLine": 300,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 304,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 304,
"endOffset": 9,
"startLine": 304,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 306,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 306,
"endOffset": 25,
"startLine": 306,
"startOffset": 20
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 307,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 307,
"endOffset": 13,
"startLine": 307,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 309,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 309,
"endOffset": 9,
"startLine": 309,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 310,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 310,
"endOffset": 9,
"startLine": 310,
"startOffset": 4
},
"type": "CODE_SMELL"
}
] | 2,456 |
autoregressive_diffusion/experiments/audio/arch/diff_wave.py
|
xxdreck/google-research
| 23,901 |
78267
|
<filename>autoregressive_diffusion/experiments/audio/arch/diff_wave.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DiffWave architecture.
Ported from PyTorch to JAX from
https://github.com/philsyn/DiffWave-unconditional/blob/master/WaveNet.py
"""
from typing import Any, Callable, Iterable, Optional, Tuple
from flax import linen as nn
import jax
from jax import numpy as jnp
import numpy as np
from autoregressive_diffusion.model.architecture_components import input_embedding
from autoregressive_diffusion.model.architecture_components import layers
Array = jnp.ndarray
Shape = Iterable[int]
Dtype = Any
PRNGKey = Array
InitializerFn = Callable[[PRNGKey, Shape, Dtype], Array]
class ResBlock(nn.Module):
"""Step-conditioned Residual block."""
features: int
kernel_size: Tuple[int] = (3,)
kernel_dilation: Tuple[int] = (1,)
skip_features: Optional[int] = None
kernel_init: InitializerFn = nn.initializers.kaiming_normal()
activation: Callable[[Array], Array] = jax.nn.swish
is_causal: bool = False
@nn.compact
def __call__(self, x, t_embed):
"""Apply the residual block.
Args:
x: Inputs of shape [batch, <spatial>, features].
t_embed: Embedded time steps of shape [batch, dim].
Returns:
Mapped inputs of shape [batch, <spatial>, features] for the output and
skip connections.
"""
in_features = x.shape[-1]
if in_features != self.features:
raise ValueError(
f'DiffWave ResBlock requires the same number of input ({in_features})'
f'and output ({self.features}) features.')
h = x
if t_embed is not None:
# Project time step embedding.
t_embed = nn.Dense(
in_features,
name='step_proj')(
self.activation(t_embed))
# Reshape to [batch, 1, ..., 1, in_features] for broadcast.
t_embed = jnp.reshape(
t_embed,
(-1,) + (1,) * len(self.kernel_size) + (in_features,))
h += t_embed
# Dilated gated conv.
u = layers.CausalConv(
self.features,
self.kernel_size,
kernel_dilation=self.kernel_dilation,
kernel_init=self.kernel_init,
padding='VALID' if self.is_causal else 'SAME',
is_causal=self.is_causal,
name='dilated_tanh')(
h)
v = layers.CausalConv(
self.features,
self.kernel_size,
kernel_dilation=self.kernel_dilation,
kernel_init=self.kernel_init,
padding='VALID' if self.is_causal else 'SAME',
is_causal=self.is_causal,
name='dilated_sigmoid')(
h)
y = jax.nn.tanh(u) * jax.nn.sigmoid(v)
# Residual and skip convs.
residual = nn.Conv(
self.features,
(1,) * len(self.kernel_size),
kernel_init=self.kernel_init,
name='residual')(
y)
skip = nn.Conv(
self.skip_features or self.features,
(1,) * len(self.kernel_size),
kernel_init=self.kernel_init,
name='skip')(
y)
return (x + residual) / np.sqrt(2.), skip
class ResGroup(nn.Module):
"""Residual group with skip connection aggregation and dilation cycling.
Attributes:
num_blocks: Number of residual blocks.
features: Number of ResBlock features.
skip_features: Number of ResBlock skip connection features.
kernel_size: Kernel size for ResBlock-s.
kernel_init: Convolutional kernel initializer.
dilation_cycle: Dilation cycling length.
is_causal: Whether to use a causal architecture.
"""
num_blocks: int
features: int
skip_features: Optional[int] = None
kernel_size: Tuple[int] = (3,)
kernel_init: InitializerFn = nn.initializers.kaiming_normal()
dilation_cycle: int = 12 # Max dilation is 2 ** 11 = 2048.
is_causal: bool = False
@nn.compact
def __call__(self, x, t_embed):
"""Apply a residual group.
Args:
x: Inputs of shape [batch, <spatial>, features].
t_embed: Embedded time steps of shape [batch, dim].
Returns:
Mapped inputs of shape [batch, <spatial>, skip_features]
"""
y = 0.
for i in range(self.num_blocks):
x, skip = ResBlock(
features=self.features,
skip_features=self.skip_features,
kernel_size=self.kernel_size,
kernel_dilation=(2 ** (i % self.dilation_cycle),),
kernel_init=self.kernel_init,
is_causal=self.is_causal)(
x, t_embed)
y += skip
y /= np.sqrt(self.num_blocks)
return y
class DiffWave(nn.Module):
"""DiffWave network architecture.
Attributes:
num_blocks: Number of residual blocks.
features: Number of ResBlock features.
max_time: Number of generation steps (i.e. data dimensionality).
num_classes: Number of output classes.
output_features: Number of output features.
skip_features: Number of ResBlock skip connection features.
kernel_size: Kernel size for ResBlock-s.
kernel_init: Convolutional kernel initializer.
dilation_cycle: ResGroup dilation cycling length.
is_causal: Whether to use the causal architecture.
"""
num_blocks: int
features: int
max_time: int
num_classes: int
output_features: Optional[int] = 1
skip_features: Optional[int] = None
kernel_size: Tuple[int] = (3,)
kernel_init: InitializerFn = nn.initializers.kaiming_normal()
dilation_cycle: int = 12
is_causal: bool = False
@nn.compact
def __call__(self, x, t, mask, train,
context = None):
"""Apply the WaveDiff network.
Args:
x: Inputs of shape [batch, <spatial>, features].
t: Time steps of shape [batch].
mask: Array of the same shape as `x` giving the auto-regressive mask.
train: If True, the model is ran in training. *Not* used in this
architecture.
context: Unused.
Returns:
Mapped inputs of shape [batch, <spatial>, skip_features]
"""
assert context is None
# Sinusoidal features + MLP for time step embedding.
# Note: this differs from the DiffWave embedding in several ways:
# * Time embeddings have different dimensionality: 128-512-512
# vs 256-1024-1024.
# * First convlution has kernel size 3 instead of 1.
h, t_embed = input_embedding.InputProcessingAudio(
num_classes=self.num_classes,
num_channels=self.features,
max_time=self.max_time,
is_causal=self.is_causal)(
x, t, mask, train)
del x, t, mask
h = nn.relu(h)
h = ResGroup(
num_blocks=self.num_blocks,
features=self.features,
skip_features=self.skip_features,
kernel_size=self.kernel_size,
dilation_cycle=self.dilation_cycle,
kernel_init=self.kernel_init,
is_causal=self.is_causal,
name='res_group')(
h, t_embed)
# Final convolution.
h = nn.Conv(
features=self.skip_features or self.features,
kernel_size=(1,) * len(self.kernel_size),
kernel_init=self.kernel_init,
name='flower_conv')(
h)
h = nn.relu(h)
if self.output_features:
h = nn.Conv(
features=self.output_features,
kernel_size=(1,) * len(self.kernel_size),
kernel_init=nn.initializers.zeros,
name='class_conv')(
h)
return h
|
0
|
0
|
0.0
|
8
|
0
|
0
|
1.0
|
0
|
11
|
[] | 498 |
rllib/models/tf/__init__.py
|
firebolt55439/ray
| 21,382 |
11324006
|
<gh_stars>1000+
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
from ray.rllib.models.tf.visionnet import VisionNetwork
__all__ = [
"FullyConnectedNetwork",
"RecurrentNetwork",
"TFModelV2",
"VisionNetwork",
]
| null | null | null | null | null | null | null | null | null |
[] | 7,001 |
tests/components/select/test_significant_change.py
|
MrDelik/core
| 30,023 |
3272003
|
<reponame>MrDelik/core
"""Test the select significant change platform."""
from homeassistant.components.select.significant_change import (
async_check_significant_change,
)
from homeassistant.core import HomeAssistant
async def test_significant_change(hass: HomeAssistant) -> None:
"""Detect select significant change."""
attrs1 = {"options": ["option1", "option2"]}
attrs2 = {"options": ["option1", "option2", "option3"]}
assert not async_check_significant_change(
hass, "option1", attrs1, "option1", attrs1
)
assert not async_check_significant_change(
hass, "option1", attrs1, "option1", attrs2
)
assert async_check_significant_change(hass, "option1", attrs1, "option2", attrs1)
assert async_check_significant_change(hass, "option1", attrs1, "option2", attrs2)
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
1
|
[] | 2,447 |
tests/logging/test_remote_handler.py
|
lukaslihotzki/synapse
| 9,945 |
1019328
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.test.proto_helpers import AccumulatingProtocol
from synapse.logging import RemoteHandler
from tests.logging import LoggerCleanupMixin
from tests.server import FakeTransport, get_clock
from tests.unittest import TestCase
def connect_logging_client(reactor, client_id):
# This is essentially tests.server.connect_client, but disabling autoflush on
# the client transport. This is necessary to avoid an infinite loop due to
# sending of data via the logging transport causing additional logs to be
# written.
factory = reactor.tcpClients.pop(client_id)[2]
client = factory.buildProtocol(None)
server = AccumulatingProtocol()
server.makeConnection(FakeTransport(client, reactor))
client.makeConnection(FakeTransport(server, reactor, autoflush=False))
return client, server
class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase):
def setUp(self):
self.reactor, _ = get_clock()
def test_log_output(self):
"""
The remote handler delivers logs over TCP.
"""
handler = RemoteHandler("127.0.0.1", 9000, _reactor=self.reactor)
logger = self.get_logger(handler)
logger.info("Hello there, %s!", "wally")
# Trigger the connection
client, server = connect_logging_client(self.reactor, 0)
# Trigger data being sent
client.transport.flush()
# One log message, with a single trailing newline
logs = server.data.decode("utf8").splitlines()
self.assertEqual(len(logs), 1)
self.assertEqual(server.data.count(b"\n"), 1)
# Ensure the data passed through properly.
self.assertEqual(logs[0], "Hello there, wally!")
def test_log_backpressure_debug(self):
"""
When backpressure is hit, DEBUG logs will be shed.
"""
handler = RemoteHandler(
"127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor
)
logger = self.get_logger(handler)
# Send some debug messages
for i in range(0, 3):
logger.debug("debug %s" % (i,))
# Send a bunch of useful messages
for i in range(0, 7):
logger.info("info %s" % (i,))
# The last debug message pushes it past the maximum buffer
logger.debug("too much debug")
# Allow the reconnection
client, server = connect_logging_client(self.reactor, 0)
client.transport.flush()
# Only the 7 infos made it through, the debugs were elided
logs = server.data.splitlines()
self.assertEqual(len(logs), 7)
self.assertNotIn(b"debug", server.data)
def test_log_backpressure_info(self):
"""
When backpressure is hit, DEBUG and INFO logs will be shed.
"""
handler = RemoteHandler(
"127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor
)
logger = self.get_logger(handler)
# Send some debug messages
for i in range(0, 3):
logger.debug("debug %s" % (i,))
# Send a bunch of useful messages
for i in range(0, 10):
logger.warning("warn %s" % (i,))
# Send a bunch of info messages
for i in range(0, 3):
logger.info("info %s" % (i,))
# The last debug message pushes it past the maximum buffer
logger.debug("too much debug")
# Allow the reconnection
client, server = connect_logging_client(self.reactor, 0)
client.transport.flush()
# The 10 warnings made it through, the debugs and infos were elided
logs = server.data.splitlines()
self.assertEqual(len(logs), 10)
self.assertNotIn(b"debug", server.data)
self.assertNotIn(b"info", server.data)
def test_log_backpressure_cut_middle(self):
"""
When backpressure is hit, and no more DEBUG and INFOs cannot be culled,
it will cut the middle messages out.
"""
handler = RemoteHandler(
"127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor
)
logger = self.get_logger(handler)
# Send a bunch of useful messages
for i in range(0, 20):
logger.warning("warn %s" % (i,))
# Allow the reconnection
client, server = connect_logging_client(self.reactor, 0)
client.transport.flush()
# The first five and last five warnings made it through, the debugs and
# infos were elided
logs = server.data.decode("utf8").splitlines()
self.assertEqual(
["warn %s" % (i,) for i in range(5)]
+ ["warn %s" % (i,) for i in range(15, 20)],
logs,
)
def test_cancel_connection(self):
"""
Gracefully handle the connection being cancelled.
"""
handler = RemoteHandler(
"127.0.0.1", 9000, maximum_buffer=10, _reactor=self.reactor
)
logger = self.get_logger(handler)
# Send a message.
logger.info("Hello there, %s!", "wally")
# Do not accept the connection and shutdown. This causes the pending
# connection to be cancelled (and should not raise any exceptions).
handler.close()
| null | null | null | null | null | null | null | null | null |
[] | 13,205 |
zerver/management/commands/add_scim_client.py
|
dumpmemory/zulip
| 17,004 |
6485693
|
<reponame>dumpmemory/zulip<gh_stars>1000+
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zerver.models import SCIMClient
class Command(ZulipBaseCommand):
help = """Create a SCIM client entry in the database."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser)
parser.add_argument("name", help="name of the client")
def handle(self, *args: Any, **options: Any) -> None:
client_name = options["name"]
realm = self.get_realm(options)
assert realm
SCIMClient.objects.create(realm=realm, name=client_name)
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
2
|
[] | 4,227 |
crabageprediction/venv/Lib/site-packages/numpy/lib/tests/test_financial_expired.py
|
13rianlucero/CrabAgePrediction
| 20,453 |
1956008
|
import sys
import pytest
import numpy as np
@pytest.mark.skipif(sys.version_info[:2] < (3, 7),
reason="requires python 3.7 or higher")
def test_financial_expired():
match = 'NEP 32'
with pytest.warns(DeprecationWarning, match=match):
func = np.fv
with pytest.raises(RuntimeError, match=match):
func(1, 2, 3)
| null | null | null | null | null | null | null | null | null |
[] | 8,799 |
eeg_modelling/eeg_viewer/lookup_test.py
|
deepneuralmachine/google-research
| 23,901 |
3363429
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
import re
from absl.testing import absltest
from eeg_modelling.eeg_viewer import lookup
CHANNEL_KEYS = ['eeg_channel/EEG feat_1-REF/samples',
'eeg_channel/EEG feat_2/samples']
CHANNEL_MATCHERS = [
re.compile(r'eeg_channel/EEG (\w+)(-\w+)*/samples'),
re.compile(r'eeg_channel/POL (EKG\w+)/samples'),
re.compile(r'eeg_channel/(\w+)/samples'),
re.compile(r'eeg_channel/EEG (\w+)(-\w+)*/resampled_samples'),
re.compile(r'(seizure_bin)ary_per_sec'),
]
class LookupTest(absltest.TestCase):
def setUp(self):
super(LookupTest, self).setUp()
self.test_lookup = lookup.Lookup(CHANNEL_KEYS, CHANNEL_MATCHERS)
def testGetKeyFromIndex(self):
self.assertEqual('eeg_channel/EEG feat_1-REF/samples',
self.test_lookup.GetKeyFromIndex(0))
self.assertEqual('eeg_channel/EEG feat_2/samples',
self.test_lookup.GetKeyFromIndex(1))
def testGetKeyFromIndexReturnsNone(self):
self.assertIsNone(self.test_lookup.GetKeyFromIndex(3))
def testGetIndexFromShorthand(self):
self.assertEqual('0', self.test_lookup.GetIndexFromShorthand('FEAT_1'))
self.assertEqual('1', self.test_lookup.GetIndexFromShorthand('FEAT_2'))
def testGetIndexFromShorthandReturnsNone(self):
self.assertIsNone(self.test_lookup.GetIndexFromShorthand('FEAT_3'))
def testGetShorthandFromKey(self):
self.assertEqual('FEAT_1', self.test_lookup.GetShorthandFromKey(
'eeg_channel/EEG feat_1-REF/samples'))
self.assertEqual('FEAT_2', self.test_lookup.GetShorthandFromKey(
'eeg_channel/EEG feat_2/samples'))
def testGetShorthandFromKeyReturnsNone(self):
self.assertIsNone(self.test_lookup.GetShorthandFromKey(
'eeg_channel/EEG feat_3/samples'))
if __name__ == '__main__':
absltest.main()
| null | null | null | null | null | null | null | null | null |
[] | 2,819 |
html/semantics/forms/form-submission-0/resources/file-submission.py
|
meyerweb/wpt
| 14,668 |
9618385
|
<filename>html/semantics/forms/form-submission-0/resources/file-submission.py
import json
from wptserve.utils import isomorphic_decode
def main(request, response):
headers = [(b"Content-Type", b"text/html")]
testinput = request.POST.first(b"testinput")
value = isomorphic_decode(testinput.value)
body = u"<script>parent.postMessage(" + json.dumps(value) + u", '*');</script>"
return headers, body
|
1
|
0
|
0.0
|
0
|
0
|
1
|
1.0
|
0
|
1
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 5,
"message": "Remove the unused function parameter \"response\".",
"textRange": {
"endLine": 5,
"endOffset": 26,
"startLine": 5,
"startOffset": 18
},
"type": "CODE_SMELL"
}
] | 5,595 |
tests/validation/tests/v3_api/test_bkp_restore_s3_with_creds.py
|
ursinnDev/rancher_rancher
| 18,697 |
10141581
|
<filename>tests/validation/tests/v3_api/test_bkp_restore_s3_with_creds.py
import pytest
from .common import * # NOQA
from .test_rke_cluster_provisioning import rke_config, validate_rke_dm_host_2,\
node_template_linode
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"nodes": []}
backup_info = {"backupname": None, "backup_id": None, "workload": None,
"backupfilename": None, "etcdbackupdata": None}
@if_test_all_snapshot
def test_bkp_restore_s3_with_creds_create():
validate_backup_create(namespace, backup_info, "s3")
@if_test_all_snapshot
def test_bkp_restore_s3_with_creds_restore():
ns, binfo = validate_backup_create(namespace, backup_info, "s3")
validate_backup_restore(ns, binfo)
@if_test_all_snapshot
def test_bkp_restore_s3_with_creds_delete():
ns, binfo = validate_backup_create(namespace, backup_info, "s3")
ns, binfo = validate_backup_restore(ns, binfo)
validate_backup_delete(ns, binfo, "s3")
@pytest.fixture(scope='module', autouse="True")
def create_project_client_and_cluster_s3_with_creds(node_template_linode,
request):
rke_config["services"]["etcd"]["backupConfig"] = {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig",
"s3BackupConfig": {
"type": "s3BackupConfig",
"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY,
"bucketName": AWS_S3_BUCKET_NAME,
"folder": AWS_S3_BUCKET_FOLDER_NAME,
"region": AWS_REGION,
"endpoint": "s3.amazonaws.com"
}
}
cluster_name = random_name()
validate_rke_dm_host_2(node_template_linode,
rke_config, False, cluster_name)
client = get_user_client()
cluster = get_cluster_by_name(client, cluster_name)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testnoiam")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
cluster_cleanup(client, cluster)
request.addfinalizer(fin)
|
1
|
0
|
0.0
|
0
|
0
|
1
|
1.0
|
0
|
5
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 2,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 2,
"endOffset": 21,
"startLine": 2,
"startOffset": 0
},
"type": "CODE_SMELL"
}
] | 11,760 |
sympy/matrices/decompositions.py
|
nashalex/sympy
| 8,323 |
9778615
|
import copy
from sympy.core import S
from sympy.core.function import expand_mul
from sympy.functions.elementary.miscellaneous import Min, sqrt
from sympy.functions.elementary.complexes import sign
from .common import NonSquareMatrixError, NonPositiveDefiniteMatrixError
from .utilities import _get_intermediate_simp, _iszero
from .determinant import _find_reasonable_pivot_naive
def _rank_decomposition(M, iszerofunc=_iszero, simplify=False):
r"""Returns a pair of matrices (`C`, `F`) with matching rank
such that `A = C F`.
Parameters
==========
iszerofunc : Function, optional
A function used for detecting whether an element can
act as a pivot. ``lambda x: x.is_zero`` is used by default.
simplify : Bool or Function, optional
A function used to simplify elements when looking for a
pivot. By default SymPy's ``simplify`` is used.
Returns
=======
(C, F) : Matrices
`C` and `F` are full-rank matrices with rank as same as `A`,
whose product gives `A`.
See Notes for additional mathematical details.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([
... [1, 3, 1, 4],
... [2, 7, 3, 9],
... [1, 5, 3, 1],
... [1, 2, 0, 8]
... ])
>>> C, F = A.rank_decomposition()
>>> C
Matrix([
[1, 3, 4],
[2, 7, 9],
[1, 5, 1],
[1, 2, 8]])
>>> F
Matrix([
[1, 0, -2, 0],
[0, 1, 1, 0],
[0, 0, 0, 1]])
>>> C * F == A
True
Notes
=====
Obtaining `F`, an RREF of `A`, is equivalent to creating a
product
.. math::
E_n E_{n-1} ... E_1 A = F
where `E_n, E_{n-1}, \dots, E_1` are the elimination matrices or
permutation matrices equivalent to each row-reduction step.
The inverse of the same product of elimination matrices gives
`C`:
.. math::
C = \left(E_n E_{n-1} \dots E_1\right)^{-1}
It is not necessary, however, to actually compute the inverse:
the columns of `C` are those from the original matrix with the
same column indices as the indices of the pivot columns of `F`.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rank_factorization
.. [2] <NAME>.; <NAME>. (1 June 1999).
"Full Rank Factorization of Matrices".
Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882
See Also
========
sympy.matrices.matrices.MatrixReductions.rref
"""
F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc,
pivots=True)
rank = len(pivot_cols)
C = M.extract(range(M.rows), pivot_cols)
F = F[:rank, :]
return C, F
def _liupc(M):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
.. [1] Symbolic Sparse Cholesky Factorization using Elimination Trees,
<NAME> (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(M.rows)]
for r, c, _ in M.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*M.rows
virtual = [inf]*M.rows
for r in range(M.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def _row_structure_symbolic_cholesky(M):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
.. [1] Symbolic Sparse Cholesky Factorization using Elimination Trees,
<NAME> (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = M.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(M.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def _cholesky(M, hermitian=True):
"""Returns the Cholesky-type decomposition L of a matrix A
such that L * L.H == A if hermitian flag is True,
or L * L.T == A if hermitian is False.
A must be a Hermitian positive-definite matrix if hermitian is True,
or a symmetric matrix if it is False.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
The matrix can have complex entries:
>>> from sympy import I
>>> A = Matrix(((9, 3*I), (-3*I, 5)))
>>> A.cholesky()
Matrix([
[ 3, 0],
[-I, 2]])
>>> A.cholesky() * A.cholesky().H
Matrix([
[ 9, 3*I],
[-3*I, 5]])
Non-hermitian Cholesky-type decomposition may be useful when the
matrix is not positive-definite.
>>> A = Matrix([[1, 2], [2, 1]])
>>> L = A.cholesky(hermitian=False)
>>> L
Matrix([
[1, 0],
[2, sqrt(3)*I]])
>>> L*L.T == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.LDLdecomposition
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
L = MutableDenseMatrix.zeros(M.rows, M.rows)
if hermitian:
for i in range(M.rows):
for j in range(i):
L[i, j] = ((1 / L[j, j])*(M[i, j] -
sum(L[i, k]*L[j, k].conjugate() for k in range(j))))
Lii2 = (M[i, i] -
sum(L[i, k]*L[i, k].conjugate() for k in range(i)))
if Lii2.is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
L[i, i] = sqrt(Lii2)
else:
for i in range(M.rows):
for j in range(i):
L[i, j] = ((1 / L[j, j])*(M[i, j] -
sum(L[i, k]*L[j, k] for k in range(j))))
L[i, i] = sqrt(M[i, i] -
sum(L[i, k]**2 for k in range(i)))
return M._new(L)
def _cholesky_sparse(M, hermitian=True):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
The matrix can have complex entries:
>>> from sympy import I
>>> A = SparseMatrix(((9, 3*I), (-3*I, 5)))
>>> A.cholesky()
Matrix([
[ 3, 0],
[-I, 2]])
>>> A.cholesky() * A.cholesky().H
Matrix([
[ 9, 3*I],
[-3*I, 5]])
Non-hermitian Cholesky-type decomposition may be useful when the
matrix is not positive-definite.
>>> A = SparseMatrix([[1, 2], [2, 1]])
>>> L = A.cholesky(hermitian=False)
>>> L
Matrix([
[1, 0],
[2, sqrt(3)*I]])
>>> L*L.T == A
True
See Also
========
sympy.matrices.sparse.SparseMatrix.LDLdecomposition
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
Crowstruc = M.row_structure_symbolic_cholesky()
C = MutableDenseMatrix.zeros(M.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = M[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
if hermitian:
summ += C[i, p1]*C[j, p1].conjugate()
else:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] = dps((C[i, j] - summ) / C[j, j])
else: # i == j
C[j, j] = M[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
if hermitian:
summ += C[j, k]*C[j, k].conjugate()
else:
summ += C[j, k]**2
else:
break
Cjj2 = dps(C[j, j] - summ)
if hermitian and Cjj2.is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
C[j, j] = sqrt(Cjj2)
return M._new(C)
def _LDLdecomposition(M, hermitian=True):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.H == A if hermitian flag is True, or
L * D * L.T == A if hermitian is False.
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a Hermitian positive-definite matrix if hermitian is True,
or a symmetric matrix otherwise.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
The matrix can have complex entries:
>>> from sympy import I
>>> A = Matrix(((9, 3*I), (-3*I, 5)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0],
[-I/3, 1]])
>>> D
Matrix([
[9, 0],
[0, 4]])
>>> L*D*L.H == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRdecomposition
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
D = MutableDenseMatrix.zeros(M.rows, M.rows)
L = MutableDenseMatrix.eye(M.rows)
if hermitian:
for i in range(M.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(M[i, j] - sum(
L[i, k]*L[j, k].conjugate()*D[k, k] for k in range(j)))
D[i, i] = (M[i, i] -
sum(L[i, k]*L[i, k].conjugate()*D[k, k] for k in range(i)))
if D[i, i].is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
else:
for i in range(M.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(M[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j)))
D[i, i] = M[i, i] - sum(L[i, k]**2*D[k, k] for k in range(i))
return M._new(L), M._new(D)
def _LDLdecomposition_sparse(M, hermitian=True):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from .dense import MutableDenseMatrix
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if hermitian and not M.is_hermitian:
raise ValueError("Matrix must be Hermitian.")
if not hermitian and not M.is_symmetric():
raise ValueError("Matrix must be symmetric.")
dps = _get_intermediate_simp(expand_mul, expand_mul)
Lrowstruc = M.row_structure_symbolic_cholesky()
L = MutableDenseMatrix.eye(M.rows)
D = MutableDenseMatrix.zeros(M.rows, M.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = M[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
if hermitian:
summ += L[i, p1]*L[j, p1].conjugate()*D[p1, p1]
else:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] = dps((L[i, j] - summ) / D[j, j])
else: # i == j
D[i, i] = M[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
if hermitian:
summ += L[i, k]*L[i, k].conjugate()*D[k, k]
else:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] = dps(D[i, i] - summ)
if hermitian and D[i, i].is_positive is False:
raise NonPositiveDefiniteMatrixError(
"Matrix must be positive-definite")
return M._new(L), M._new(D)
def _LUdecomposition(M, iszerofunc=_iszero, simpfunc=None, rankcheck=False):
"""Returns (L, U, perm) where L is a lower triangular matrix with unit
diagonal, U is an upper triangular matrix, and perm is a list of row
swap index pairs. If A is the original matrix, then
``A = (L*U).permuteBkwd(perm)``, and the row permutation matrix P such
that $P A = L U$ can be computed by ``P = eye(A.rows).permuteFwd(perm)``.
See documentation for LUCombined for details about the keyword argument
rankcheck, iszerofunc, and simpfunc.
Parameters
==========
rankcheck : bool, optional
Determines if this function should detect the rank
deficiency of the matrixis and should raise a
``ValueError``.
iszerofunc : function, optional
A function which determines if a given expression is zero.
The function should be a callable that takes a single
sympy expression and returns a 3-valued boolean value
``True``, ``False``, or ``None``.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
simpfunc : function or None, optional
A function that simplifies the input.
If this is specified as a function, this function should be
a callable that takes a single sympy expression and returns
an another sympy expression that is algebraically
equivalent.
If ``None``, it indicates that the pivot search algorithm
should not attempt to simplify any candidate pivots.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.dense.DenseMatrix.LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = M.LUdecomposition_Simple(iszerofunc=iszerofunc,
simpfunc=simpfunc, rankcheck=rankcheck)
# L is lower triangular ``M.rows x M.rows``
# U is upper triangular ``M.rows x M.cols``
# L has unit diagonal. For each column in combined, the subcolumn
# below the diagonal of combined is shared by L.
# If L has more columns than combined, then the remaining subcolumns
# below the diagonal of L are zero.
# The upper triangular portion of L and combined are equal.
def entry_L(i, j):
if i < j:
# Super diagonal entry
return M.zero
elif i == j:
return M.one
elif j < combined.cols:
return combined[i, j]
# Subdiagonal entry of L with no corresponding
# entry in combined
return M.zero
def entry_U(i, j):
return M.zero if i > j else combined[i, j]
L = M._new(combined.rows, combined.rows, entry_L)
U = M._new(combined.rows, combined.cols, entry_U)
return L, U, p
def _LUdecomposition_Simple(M, iszerofunc=_iszero, simpfunc=None,
rankcheck=False):
r"""Compute the PLU decomposition of the matrix.
Parameters
==========
rankcheck : bool, optional
Determines if this function should detect the rank
deficiency of the matrixis and should raise a
``ValueError``.
iszerofunc : function, optional
A function which determines if a given expression is zero.
The function should be a callable that takes a single
sympy expression and returns a 3-valued boolean value
``True``, ``False``, or ``None``.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
simpfunc : function or None, optional
A function that simplifies the input.
If this is specified as a function, this function should be
a callable that takes a single sympy expression and returns
an another sympy expression that is algebraically
equivalent.
If ``None``, it indicates that the pivot search algorithm
should not attempt to simplify any candidate pivots.
It is internally used by the pivot searching algorithm.
See the notes section for a more information about the
pivot searching algorithm.
Returns
=======
(lu, row_swaps) : (Matrix, list)
If the original matrix is a $m, n$ matrix:
*lu* is a $m, n$ matrix, which contains result of the
decomposition in a compresed form. See the notes section
to see how the matrix is compressed.
*row_swaps* is a $m$-element list where each element is a
pair of row exchange indices.
``A = (L*U).permute_backward(perm)``, and the row
permutation matrix $P$ from the formula $P A = L U$ can be
computed by ``P=eye(A.row).permute_forward(perm)``.
Raises
======
ValueError
Raised if ``rankcheck=True`` and the matrix is found to
be rank deficient during the computation.
Notes
=====
About the PLU decomposition:
PLU decomposition is a generalization of a LU decomposition
which can be extended for rank-deficient matrices.
It can further be generalized for non-square matrices, and this
is the notation that SymPy is using.
PLU decomposition is a decomposition of a $m, n$ matrix $A$ in
the form of $P A = L U$ where
* $L$ is a $m, m$ lower triangular matrix with unit diagonal
entries.
* $U$ is a $m, n$ upper triangular matrix.
* $P$ is a $m, m$ permutation matrix.
So, for a square matrix, the decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & U_{n-1, n-1}
\end{bmatrix}
And for a matrix with more rows than the columns,
the decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \ddots
& \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & 1 & 0
& \cdots & 0 \\
L_{n, 0} & L_{n, 1} & L_{n, 2} & \cdots & L_{n, n-1} & 1
& \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots & \vdots
& \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & L_{m-1, n-1}
& 0 & \cdots & 1 \\
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & U_{n-1, n-1} \\
0 & 0 & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0
\end{bmatrix}
Finally, for a matrix with more columns than the rows, the
decomposition would look like:
.. math::
L = \begin{bmatrix}
1 & 0 & 0 & \cdots & 0 \\
L_{1, 0} & 1 & 0 & \cdots & 0 \\
L_{2, 0} & L_{2, 1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & 1
\end{bmatrix}
.. math::
U = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
& \cdots & U_{0, n-1} \\
0 & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
& \cdots & U_{1, n-1} \\
0 & 0 & U_{2, 2} & \cdots & U_{2, m-1}
& \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots
& \cdots & \vdots \\
0 & 0 & 0 & \cdots & U_{m-1, m-1}
& \cdots & U_{m-1, n-1} \\
\end{bmatrix}
About the compressed LU storage:
The results of the decomposition are often stored in compressed
forms rather than returning $L$ and $U$ matrices individually.
It may be less intiuitive, but it is commonly used for a lot of
numeric libraries because of the efficiency.
The storage matrix is defined as following for this specific
method:
* The subdiagonal elements of $L$ are stored in the subdiagonal
portion of $LU$, that is $LU_{i, j} = L_{i, j}$ whenever
$i > j$.
* The elements on the diagonal of $L$ are all 1, and are not
explicitly stored.
* $U$ is stored in the upper triangular portion of $LU$, that is
$LU_{i, j} = U_{i, j}$ whenever $i <= j$.
* For a case of $m > n$, the right side of the $L$ matrix is
trivial to store.
* For a case of $m < n$, the below side of the $U$ matrix is
trivial to store.
So, for a square matrix, the compressed output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots & U_{n-1, n-1}
\end{bmatrix}
For a matrix with more rows than the columns, the compressed
output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{n-1, 0} & L_{n-1, 1} & L_{n-1, 2} & \cdots
& U_{n-1, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots
& L_{m-1, n-1} \\
\end{bmatrix}
For a matrix with more columns than the rows, the compressed
output matrix would be:
.. math::
LU = \begin{bmatrix}
U_{0, 0} & U_{0, 1} & U_{0, 2} & \cdots & U_{0, m-1}
& \cdots & U_{0, n-1} \\
L_{1, 0} & U_{1, 1} & U_{1, 2} & \cdots & U_{1, m-1}
& \cdots & U_{1, n-1} \\
L_{2, 0} & L_{2, 1} & U_{2, 2} & \cdots & U_{2, m-1}
& \cdots & U_{2, n-1} \\
\vdots & \vdots & \vdots & \ddots & \vdots
& \cdots & \vdots \\
L_{m-1, 0} & L_{m-1, 1} & L_{m-1, 2} & \cdots & U_{m-1, m-1}
& \cdots & U_{m-1, n-1} \\
\end{bmatrix}
About the pivot searching algorithm:
When a matrix contains symbolic entries, the pivot search algorithm
differs from the case where every entry can be categorized as zero or
nonzero.
The algorithm searches column by column through the submatrix whose
top left entry coincides with the pivot position.
If it exists, the pivot is the first entry in the current search
column that iszerofunc guarantees is nonzero.
If no such candidate exists, then each candidate pivot is simplified
if simpfunc is not None.
The search is repeated, with the difference that a candidate may be
the pivot if ``iszerofunc()`` cannot guarantee that it is nonzero.
In the second search the pivot is the first candidate that
iszerofunc can guarantee is nonzero.
If no such candidate exists, then the pivot is the first candidate
for which iszerofunc returns None.
If no such candidate exists, then the search is repeated in the next
column to the right.
The pivot search algorithm differs from the one in ``rref()``, which
relies on ``_find_reasonable_pivot()``.
Future versions of ``LUdecomposition_simple()`` may use
``_find_reasonable_pivot()``.
See Also
========
sympy.matrices.matrices.MatrixBase.LUdecomposition
LUdecompositionFF
LUsolve
"""
if rankcheck:
# https://github.com/sympy/sympy/issues/9796
pass
if S.Zero in M.shape:
# Define LU decomposition of a matrix with no entries as a matrix
# of the same dimensions with all zero entries.
return M.zeros(M.rows, M.cols), []
dps = _get_intermediate_simp()
lu = M.as_mutable()
row_swaps = []
pivot_col = 0
for pivot_row in range(0, lu.rows - 1):
# Search for pivot. Prefer entry that iszeropivot determines
# is nonzero, over entry that iszeropivot cannot guarantee
# is zero.
# XXX ``_find_reasonable_pivot`` uses slow zero testing. Blocked by bug #10279
# Future versions of LUdecomposition_simple can pass iszerofunc and simpfunc
# to _find_reasonable_pivot().
# In pass 3 of _find_reasonable_pivot(), the predicate in ``if x.equals(S.Zero):``
# calls sympy.simplify(), and not the simplification function passed in via
# the keyword argument simpfunc.
iszeropivot = True
while pivot_col != M.cols and iszeropivot:
sub_col = (lu[r, pivot_col] for r in range(pivot_row, M.rows))
pivot_row_offset, pivot_value, is_assumed_non_zero, ind_simplified_pairs =\
_find_reasonable_pivot_naive(sub_col, iszerofunc, simpfunc)
iszeropivot = pivot_value is None
if iszeropivot:
# All candidate pivots in this column are zero.
# Proceed to next column.
pivot_col += 1
if rankcheck and pivot_col != pivot_row:
# All entries including and below the pivot position are
# zero, which indicates that the rank of the matrix is
# strictly less than min(num rows, num cols)
# Mimic behavior of previous implementation, by throwing a
# ValueError.
raise ValueError("Rank of matrix is strictly less than"
" number of rows or columns."
" Pass keyword argument"
" rankcheck=False to compute"
" the LU decomposition of this matrix.")
candidate_pivot_row = None if pivot_row_offset is None else pivot_row + pivot_row_offset
if candidate_pivot_row is None and iszeropivot:
# If candidate_pivot_row is None and iszeropivot is True
# after pivot search has completed, then the submatrix
# below and to the right of (pivot_row, pivot_col) is
# all zeros, indicating that Gaussian elimination is
# complete.
return lu, row_swaps
# Update entries simplified during pivot search.
for offset, val in ind_simplified_pairs:
lu[pivot_row + offset, pivot_col] = val
if pivot_row != candidate_pivot_row:
# Row swap book keeping:
# Record which rows were swapped.
# Update stored portion of L factor by multiplying L on the
# left and right with the current permutation.
# Swap rows of U.
row_swaps.append([pivot_row, candidate_pivot_row])
# Update L.
lu[pivot_row, 0:pivot_row], lu[candidate_pivot_row, 0:pivot_row] = \
lu[candidate_pivot_row, 0:pivot_row], lu[pivot_row, 0:pivot_row]
# Swap pivot row of U with candidate pivot row.
lu[pivot_row, pivot_col:lu.cols], lu[candidate_pivot_row, pivot_col:lu.cols] = \
lu[candidate_pivot_row, pivot_col:lu.cols], lu[pivot_row, pivot_col:lu.cols]
# Introduce zeros below the pivot by adding a multiple of the
# pivot row to a row under it, and store the result in the
# row under it.
# Only entries in the target row whose index is greater than
# start_col may be nonzero.
start_col = pivot_col + 1
for row in range(pivot_row + 1, lu.rows):
# Store factors of L in the subcolumn below
# (pivot_row, pivot_row).
lu[row, pivot_row] = \
dps(lu[row, pivot_col]/lu[pivot_row, pivot_col])
# Form the linear combination of the pivot row and the current
# row below the pivot row that zeros the entries below the pivot.
# Employing slicing instead of a loop here raises
# NotImplementedError: Cannot add Zero to MutableSparseMatrix
# in sympy/matrices/tests/test_sparse.py.
# c = pivot_row + 1 if pivot_row == pivot_col else pivot_col
for c in range(start_col, lu.cols):
lu[row, c] = dps(lu[row, c] - lu[row, pivot_row]*lu[pivot_row, c])
if pivot_row != pivot_col:
# matrix rank < min(num rows, num cols),
# so factors of L are not stored directly below the pivot.
# These entries are zero by construction, so don't bother
# computing them.
for row in range(pivot_row + 1, lu.rows):
lu[row, pivot_col] = M.zero
pivot_col += 1
if pivot_col == lu.cols:
# All candidate pivots are zero implies that Gaussian
# elimination is complete.
return lu, row_swaps
if rankcheck:
if iszerofunc(
lu[Min(lu.rows, lu.cols) - 1, Min(lu.rows, lu.cols) - 1]):
raise ValueError("Rank of matrix is strictly less than"
" number of rows or columns."
" Pass keyword argument"
" rankcheck=False to compute"
" the LU decomposition of this matrix.")
return lu, row_swaps
def _LUdecompositionFF(M):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
See Also
========
sympy.matrices.matrices.MatrixBase.LUdecomposition
LUdecomposition_Simple
LUsolve
References
==========
.. [1] <NAME> & <NAME>, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = M.rows, M.cols
U, L, P = M.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L [k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot * Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk * U[i, j] - U[k, j] * Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def _singular_value_decomposition(A):
r"""Returns a Condensed Singular Value decomposition.
Explanation
===========
A Singular Value decomposition is a decomposition in the form $A = U \Sigma V$
where
- $U, V$ are column orthogonal matrix.
- $\Sigma$ is a diagonal matrix, where the main diagonal contains singular
values of matrix A.
A column orthogonal matrix satisfies
$\mathbb{I} = U^H U$ while a full orthogonal matrix satisfies
relation $\mathbb{I} = U U^H = U^H U$ where $\mathbb{I}$ is an identity
matrix with matching dimensions.
For matrices which are not square or are rank-deficient, it is
sufficient to return a column orthogonal matrix because augmenting
them may introduce redundant computations.
In condensed Singular Value Decomposition we only return column orthognal
matrices because of this reason
If you want to augment the results to return a full orthogonal
decomposition, you should use the following procedures.
- Augment the $U, V$ matrices with columns that are orthogonal to every
other columns and make it square.
- Augument the $\Sigma$ matrix with zero rows to make it have the same
shape as the original matrix.
The procedure will be illustrated in the examples section.
Examples
========
we take a full rank matrix first:
>>> from sympy import Matrix
>>> A = Matrix([[1, 2],[2,1]])
>>> U, S, V = A.singular_value_decomposition()
>>> U
Matrix([
[ sqrt(2)/2, sqrt(2)/2],
[-sqrt(2)/2, sqrt(2)/2]])
>>> S
Matrix([
[1, 0],
[0, 3]])
>>> V
Matrix([
[-sqrt(2)/2, sqrt(2)/2],
[ sqrt(2)/2, sqrt(2)/2]])
If a matrix if square and full rank both U, V
are orthogonal in both directions
>>> U * U.H
Matrix([
[1, 0],
[0, 1]])
>>> U.H * U
Matrix([
[1, 0],
[0, 1]])
>>> V * V.H
Matrix([
[1, 0],
[0, 1]])
>>> V.H * V
Matrix([
[1, 0],
[0, 1]])
>>> A == U * S * V.H
True
>>> C = Matrix([
... [1, 0, 0, 0, 2],
... [0, 0, 3, 0, 0],
... [0, 0, 0, 0, 0],
... [0, 2, 0, 0, 0],
... ])
>>> U, S, V = C.singular_value_decomposition()
>>> V.H * V
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> V * V.H
Matrix([
[1/5, 0, 0, 0, 2/5],
[ 0, 1, 0, 0, 0],
[ 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0],
[2/5, 0, 0, 0, 4/5]])
If you want to augment the results to be a full orthogonal
decomposition, you should augment $V$ with an another orthogonal
column.
You are able to append an arbitrary standard basis that are linearly
independent to every other columns and you can run the Gram-Schmidt
process to make them augmented as orthogonal basis.
>>> V_aug = V.row_join(Matrix([[0,0,0,0,1],
... [0,0,0,1,0]]).H)
>>> V_aug = V_aug.QRdecomposition()[0]
>>> V_aug
Matrix([
[0, sqrt(5)/5, 0, -2*sqrt(5)/5, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 2*sqrt(5)/5, 0, sqrt(5)/5, 0]])
>>> V_aug.H * V_aug
Matrix([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
>>> V_aug * V_aug.H
Matrix([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
Similarly we augment U
>>> U_aug = U.row_join(Matrix([0,0,1,0]))
>>> U_aug = U_aug.QRdecomposition()[0]
>>> U_aug
Matrix([
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0]])
>>> U_aug.H * U_aug
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
>>> U_aug * U_aug.H
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
We add 2 zero columns and one row to S
>>> S_aug = S.col_join(Matrix([[0,0,0]]))
>>> S_aug = S_aug.row_join(Matrix([[0,0,0,0],
... [0,0,0,0]]).H)
>>> S_aug
Matrix([
[2, 0, 0, 0, 0],
[0, sqrt(5), 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0]])
>>> U_aug * S_aug * V_aug.H == C
True
"""
AH = A.H
m, n = A.shape
if m >= n:
V, S = (AH * A).diagonalize()
ranked = []
for i, x in enumerate(S.diagonal()):
if not x.is_zero:
ranked.append(i)
V = V[:, ranked]
Singular_vals = [sqrt(S[i, i]) for i in range(S.rows) if i in ranked]
S = S.zeros(len(Singular_vals))
for i in range(len(Singular_vals)):
S[i, i] = Singular_vals[i]
V, _ = V.QRdecomposition()
U = A * V * S.inv()
else:
U, S = (A * AH).diagonalize()
ranked = []
for i, x in enumerate(S.diagonal()):
if not x.is_zero:
ranked.append(i)
U = U[:, ranked]
Singular_vals = [sqrt(S[i, i]) for i in range(S.rows) if i in ranked]
S = S.zeros(len(Singular_vals))
for i in range(len(Singular_vals)):
S[i, i] = Singular_vals[i]
U, _ = U.QRdecomposition()
V = AH * U * S.inv()
return U, S, V
def _QRdecomposition_optional(M, normalize=True):
def dot(u, v):
return u.dot(v, hermitian=True)
dps = _get_intermediate_simp(expand_mul, expand_mul)
A = M.as_mutable()
ranked = list()
Q = A
R = A.zeros(A.cols)
for j in range(A.cols):
for i in range(j):
if Q[:, i].is_zero_matrix:
continue
R[i, j] = dot(Q[:, i], Q[:, j]) / dot(Q[:, i], Q[:, i])
R[i, j] = dps(R[i, j])
Q[:, j] -= Q[:, i] * R[i, j]
Q[:, j] = dps(Q[:, j])
if Q[:, j].is_zero_matrix is False:
ranked.append(j)
R[j, j] = M.one
Q = Q.extract(range(Q.rows), ranked)
R = R.extract(ranked, range(R.cols))
if normalize:
# Normalization
for i in range(Q.cols):
norm = Q[:, i].norm()
Q[:, i] /= norm
R[i, :] *= norm
return M.__class__(Q), M.__class__(R)
def _QRdecomposition(M):
r"""Returns a QR decomposition.
Explanation
===========
A QR decomposition is a decomposition in the form $A = Q R$
where
- $Q$ is a column orthogonal matrix.
- $R$ is a upper triangular (trapezoidal) matrix.
A column orthogonal matrix satisfies
$\mathbb{I} = Q^H Q$ while a full orthogonal matrix satisfies
relation $\mathbb{I} = Q Q^H = Q^H Q$ where $I$ is an identity
matrix with matching dimensions.
For matrices which are not square or are rank-deficient, it is
sufficient to return a column orthogonal matrix because augmenting
them may introduce redundant computations.
And an another advantage of this is that you can easily inspect the
matrix rank by counting the number of columns of $Q$.
If you want to augment the results to return a full orthogonal
decomposition, you should use the following procedures.
- Augment the $Q$ matrix with columns that are orthogonal to every
other columns and make it square.
- Augument the $R$ matrix with zero rows to make it have the same
shape as the original matrix.
The procedure will be illustrated in the examples section.
Examples
========
A full rank matrix example:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
If the matrix is square and full rank, the $Q$ matrix becomes
orthogonal in both directions, and needs no augmentation.
>>> Q * Q.H
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> Q.H * Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> A == Q*R
True
A rank deficient matrix example:
>>> A = Matrix([[12, -51, 0], [6, 167, 0], [-4, 24, 0]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175],
[ 3/7, 158/175],
[-2/7, 6/35]])
>>> R
Matrix([
[14, 21, 0],
[ 0, 175, 0]])
QRdecomposition might return a matrix Q that is rectangular.
In this case the orthogonality condition might be satisfied as
$\mathbb{I} = Q.H*Q$ but not in the reversed product
$\mathbb{I} = Q * Q.H$.
>>> Q.H * Q
Matrix([
[1, 0],
[0, 1]])
>>> Q * Q.H
Matrix([
[27261/30625, 348/30625, -1914/6125],
[ 348/30625, 30589/30625, 198/6125],
[ -1914/6125, 198/6125, 136/1225]])
If you want to augment the results to be a full orthogonal
decomposition, you should augment $Q$ with an another orthogonal
column.
You are able to append an arbitrary standard basis that are linearly
independent to every other columns and you can run the Gram-Schmidt
process to make them augmented as orthogonal basis.
>>> Q_aug = Q.row_join(Matrix([0, 0, 1]))
>>> Q_aug = Q_aug.QRdecomposition()[0]
>>> Q_aug
Matrix([
[ 6/7, -69/175, 58/175],
[ 3/7, 158/175, -6/175],
[-2/7, 6/35, 33/35]])
>>> Q_aug.H * Q_aug
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> Q_aug * Q_aug.H
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
Augmenting the $R$ matrix with zero row is straightforward.
>>> R_aug = R.col_join(Matrix([[0, 0, 0]]))
>>> R_aug
Matrix([
[14, 21, 0],
[ 0, 175, 0],
[ 0, 0, 0]])
>>> Q_aug * R_aug == A
True
A zero matrix example:
>>> from sympy import Matrix
>>> A = Matrix.zeros(3, 4)
>>> Q, R = A.QRdecomposition()
They may return matrices with zero rows and columns.
>>> Q
Matrix(3, 0, [])
>>> R
Matrix(0, 4, [])
>>> Q*R
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
As the same augmentation rule described above, $Q$ can be augmented
with columns of an identity matrix and $R$ can be augmented with
rows of a zero matrix.
>>> Q_aug = Q.row_join(Matrix.eye(3))
>>> R_aug = R.col_join(Matrix.zeros(3, 4))
>>> Q_aug * Q_aug.T
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R_aug
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
>>> Q_aug * R_aug == A
True
See Also
========
sympy.matrices.dense.DenseMatrix.cholesky
sympy.matrices.dense.DenseMatrix.LDLdecomposition
sympy.matrices.matrices.MatrixBase.LUdecomposition
QRsolve
"""
return _QRdecomposition_optional(M, normalize=True)
def _upper_hessenberg_decomposition(A):
"""Converts a matrix into Hessenberg matrix H
Returns 2 matrices H, P s.t.
$P H P^{T} = A$, where H is an upper hessenberg matrix
and P is an orthogonal matrix
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([
... [1,2,3],
... [-3,5,6],
... [4,-8,9],
... ])
>>> H, P = A.upper_hessenberg_decomposition()
>>> H
Matrix([
[1, 6/5, 17/5],
[5, 213/25, -134/25],
[0, 216/25, 137/25]])
>>> P
Matrix([
[1, 0, 0],
[0, -3/5, 4/5],
[0, 4/5, 3/5]])
>>> P * H * P.H == A
True
References
==========
.. [#] https://mathworld.wolfram.com/HessenbergDecomposition.html
"""
M = A.as_mutable()
if not M.is_square:
raise NonSquareMatrixError("Matrix must be square.")
n = M.cols
P = M.eye(n)
H = M
for j in range(n - 2):
u = H[j + 1:, j]
if u[1:, :].is_zero_matrix:
continue
if sign(u[0]) != 0:
u[0] = u[0] + sign(u[0]) * u.norm()
else:
u[0] = u[0] + u.norm()
v = u / u.norm()
H[j + 1:, :] = H[j + 1:, :] - 2 * v * (v.H * H[j + 1:, :])
H[:, j + 1:] = H[:, j + 1:] - (H[:, j + 1:] * (2 * v)) * v.H
P[:, j + 1:] = P[:, j + 1:] - (P[:, j + 1:] * (2 * v)) * v.H
return H, P
| null | null | null | null | null | null | null | null | null |
[] | 6,283 |
recipes/Python/578505_The_Iota_language_in_Python/recipe-578505.py
|
tdiprima/code
| 2,023 |
6592495
|
# Iota
#
# http://semarch.linguistics.fas.nyu.edu/barker/Iota/
#
# S = λx.λy.λz.xz(yz)
# K = λx.λy.x
# i = λc.cSK
#
# i, *ii, *i*ii, **ii*ii
# 0 100 10100 1100100 Iota is the encoding.
#
## (let iota ()
## (if (eq? #\* (read-char)) ((iota)(iota))
## (lambda (c) ((c (lambda (x) (lambda (y) (lambda (z) ((x z)(y z))))))
## (lambda (x) (lambda (y) x))))))
##
S = lambda x: lambda y: lambda z: x(z)(y(z))
K = lambda x: lambda y: x
i = lambda c: c(S)(K)
I = i(i)
def _decode(path):
bit, path = path[0], path[1:]
if bit == '0':
return i, path
A, path = _decode(path)
B, path = _decode(path)
return A(B), path
decode = lambda path: _decode(path)[0]
# K = *i*i*ii = 1010100
#
print K is i(i(i(i))) is decode('1010100')
# S = *i*i*i*ii = 101010100
#
print S is i(i(i(i(i)))) is decode('101010100')
# All of these return i itself.
print i is i
print i is i(i)(i)
print i is i( i ) ( i(i)(i) )
print i is i( i(i)(i) ) ( i )
print i is i( i(i)(i) ) ( i(i)(i) )
# Identity function
#
I is decode('100') # I.e. i(i)
| null | null | null | null | null | null | null | null | null |
[] | 4,646 |
recipes/Python/577859_CLOSlike_aroundbeforeafter_auxiliary/recipe-577859.py
|
tdiprima/code
| 2,023 |
87531
|
#!/usr/bin/env python
#
# Copyright (c) 2011 <NAME> (zuo). All rights reserved.
# Licensed under the MIT License.
#
# Python 2.5+/3.x-compatibile.
#
# The newest version of this module should be downloadable from:
# https://github.com/zuo/Zuo-s-Recipes-and-Drafts/blob/master/auxmethods.py
from __future__ import with_statement # (Py2.5 needs this)
from functools import wraps
from inspect import getmro, isfunction
__all__ = (
'ClassNameConflictError',
'aux', 'primary',
'AutoAuxBase', 'AutoAuxMeta',
)
#
# exceptions
class ClassNameConflictError(Exception):
"""
Conflict: class names are identical after stripping leading underscores.
"""
def __str__(self):
cls1, cls2 = self.args
return (
'Class names: %r and %r -- are identical after stripping leading '
'underscores, which is forbidden when using aux/primary methods.'
% (cls1.__name__, cls2.__name__))
#
# non-public stuff
_SUFFIXES = '_primary', '_before', '_after', '_around'
class _WrappedMethodPlaceholder(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
raise TypeError('method placeholder is not callable '
'(forgot to apply aux() class decorator?)')
def _next_around(obj_around, self, basename, *args, **kwargs):
# try to get and call next `around` aux method
meth_around = getattr(obj_around, basename + '_around', None)
if meth_around is not None:
return meth_around(*args, **kwargs)
else:
# if there is no more `around` methods, get and call:
# `before` aux method (it can call superclasses' `before` methods)
meth_before = getattr(self, basename + '_before', None)
if meth_before is not None:
meth_before(*args, **kwargs)
# primary method (it can call superclasses' primary methods)
meth_primary = getattr(self, basename + '_primary')
pri_result = meth_primary(*args, **kwargs)
# `after` aux method (it can call superclasses' `after` methods)
meth_after = getattr(self, basename + '_after', None)
if meth_after is not None:
meth_after(*args, **kwargs)
return pri_result
def _provide_wrapper(cls, func, basename):
@wraps(func)
def wrapper(self, *args, **kwargs):
return _next_around(self, self, basename, *args, **kwargs)
added_doc = '(See: %s%s() signature).' % (basename, '_primary')
existing_doc = (getattr(wrapper, '__doc__', None) or '').rstrip()
if existing_doc:
wrapper.__doc__ = '%s\n\n%s' % (existing_doc, added_doc)
else:
wrapper.__doc__ = added_doc
setattr(cls, basename, wrapper)
def _provide_primary(cls, func, basename):
suffixed_name = basename + '_primary'
func.__name__ = suffixed_name
func.__doc__ = (
'The actual method implementation '
'(%s() is only a wrapper).' % basename)
setattr(cls, suffixed_name, func)
def _provide_wrapped_primary(cls, func):
basename = func.__name__
_provide_wrapper(cls, func, basename)
_provide_primary(cls, func, basename)
def _strip_and_check_cls_name(cls):
cls_stripped_name = cls.__name__.lstrip('_')
for supercls in getmro(cls):
if (supercls is not cls and
cls_stripped_name == supercls.__name__.lstrip('_')):
raise ClassNameConflictError(supercls, cls)
return cls_stripped_name
def _provide_call_next(cls, suffixed_name):
cls_stripped_name = _strip_and_check_cls_name(cls)
basename, qualifier = suffixed_name.rsplit('_', 1)
cn_name = '_%s__%s' % (
cls_stripped_name,
(basename if qualifier == 'primary' else suffixed_name))
if cn_name in vars(cls):
return
if qualifier == 'around':
def call_next(self, *args, **kwargs):
return _next_around(
super(cls, self), self, basename, *args, **kwargs)
else:
def call_next(self, *args, **kwargs):
super_meth = getattr(super(cls, self), suffixed_name, None)
if super_meth is not None:
return super_meth(*args, **kwargs)
call_next.__name__ = cn_name
setattr(cls, cn_name, call_next)
#
# actual decorators
def aux(cls):
"""Class decorator (for classes containing primary and/or aux methods)."""
if not isinstance(cls, type):
raise TypeError('%r is not a type' % cls)
# wrap/rename primary methods
for name, obj in tuple(vars(cls).items()): # (Py2.x/3.x-compatibile way)
if isinstance(obj, _WrappedMethodPlaceholder):
_provide_wrapped_primary(cls, obj.func)
# provide `call-next-method`-like methods
for name, obj in tuple(vars(cls).items()):
if isfunction(obj) and obj.__name__.endswith(_SUFFIXES):
_provide_call_next(cls, obj.__name__)
return cls
def primary(func):
"""Method decorator (for primary methods only)."""
if not isfunction(func):
raise TypeError('%r is not a function' % func)
return _WrappedMethodPlaceholder(func)
#
# convenience classes (any of them can be used *optionally*...)
class AutoAuxMeta(type):
"""Convenience metaclass: `aux()`-decorates classes created by it."""
def __new__(mcs, name, bases, attr_dict):
return aux(type.__new__(mcs, name, bases, attr_dict))
# (here: Py2.x/3.x-compatibile way to create a class with a custom metaclass)
AutoAuxBase = AutoAuxMeta('AutoAuxBase', (object,), {'__doc__':
"""`AutoAuxMeta`-created base class: `aux()`-decorates its subclasses."""})
#
# basic example
if __name__ == '__main__':
import sys
import time
class TimedAction(AutoAuxBase):
# note: AutoAuxBase automatically decorates your classes with aux()
def action_before(self, *args, **kwargs):
"""Start action timer."""
print('starting action timer...')
self.start_time = time.time()
def action_after(self, *args, **kwargs):
"""Stop action timer and report measured duration."""
self.action_duration = time.time() - self.start_time
print('action duration: %f' % self.action_duration)
class FileContentAction(AutoAuxBase):
def action_around(self, path):
"""Read file and pass its content on; report success or error."""
print('opening file %r...' % path)
try:
with open(path) as f:
content = f.read()
except EnvironmentError:
print(sys.exc_info()[1])
else:
result = self.__action_around(path, content)
print('file %r processed successfully' % path)
return result
class NewlinesCounter(FileContentAction, TimedAction):
item_descr = 'newlines'
@primary
def action(self, path, content):
"""Get number of newlines in a given string."""
return content.count('\n')
def action_before(self, path, *args):
"""Print a message and go on..."""
print('counting %s in file %r will start...' % (
self.item_descr, path))
self.__action_before(path, *args)
def action_around(self, path):
"""Start operation with given file path. Finally, show summary."""
result = self.__action_around(path)
if result is not None:
print('%s in file %r: %s\n' % (
self.item_descr, path, result))
else:
print('could not count %s in file %r\n' % (
self.item_descr, path))
return result
class SpacesAndNewlinesCounter(NewlinesCounter):
item_descr = 'spaces and newlines'
@primary
def action(self, path, content):
"""Get number of spaces and newlines in a given string."""
spaces = content.count(' ')
newlines = self.__action(path, content)
return spaces + newlines
example_file_paths = __file__, 'spam/spam/spam/non-existent'
nl_counter = NewlinesCounter()
spc_nl_counter = SpacesAndNewlinesCounter()
for path in example_file_paths:
nl_counter.action(path)
spc_nl_counter.action(path)
|
0
|
0
|
0.0
|
36
|
0
|
0
|
1.0
|
0
|
44
|
[] | 556 |
packages/grid/backend/grid/core/celery_app.py
|
vishalbelsare/PySyft
| 8,428 |
3234056
|
# future
from __future__ import annotations
# third party
from celery import Celery
# relative
from . import celery_config
from . import celery_serde # noqa: 401
# backend is required to persist tasks
celery_app = Celery(
"worker",
broker="amqp://guest@queue//",
)
celery_app.config_from_object(celery_config)
| null | null | null | null | null | null | null | null | null |
[] | 2,248 |
recipes/Python/521866_myspace_profile_parser/recipe-521866.py
|
tdiprima/code
| 2,023 |
188768
|
<reponame>tdiprima/code
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib, urllib2
from re import *
class Profile(object):
""" parse profile information from a myspace.com account """
def __init__(self, uid):
self.uid = uid
self.profile_url = 'http://profile.myspace.com/'
self.fake_browser = 'Opera/8.53 (Windows NT 5.1; U; de)'
self.site = self.__ua()
def __ua(self):
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', self.fake_browser)]
site = opener.open(self.profile_url+self.uid)
site = site.read()
if site:
return site
else:
return False
def getProfile(self):
re = compile('<meta name="description" content="MySpace Profile - ([^"]+)" /><meta', I)
profile_data = re.findall(self.site)
return profile_data[0].split(', ')
p = Profile(uid="textacrew")
print p.getProfile()
|
3
|
0
|
0.0
|
2
|
0
|
3
|
1.0
|
1
|
4
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 5,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 5,
"endOffset": 16,
"startLine": 5,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 29,
"message": "Rename method \"getProfile\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 29,
"endOffset": 18,
"startLine": 29,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 36,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 36,
"endOffset": 5,
"startLine": 36,
"startOffset": 0
},
"type": "CODE_SMELL"
}
] | 1,148 |
tests/python/relay/utils/ref_funcs.py
|
XiaoSong9905/tvm
| 4,640 |
5441692
|
<reponame>XiaoSong9905/tvm<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
def gather_nd(data_np, indices_np, batch_dims=0):
"""gather_nd implemented using numpy"""
data_shape = data_np.shape
indices_shape = indices_np.shape
def gather_nd_batch_dims_1_ref(data, indices):
res = []
for i, row in enumerate(data):
indices_tuple = tuple(indices[:, i]) # the indices for the i-th batch
res.append(row[indices_tuple])
# stack on the batch dim
return np.stack(res, 0)
if batch_dims > 1:
data_np_reshape = np.reshape(data_np, (-1,) + data_shape[batch_dims:])
indices_np_reshape = np.reshape(
indices_np, (indices_shape[0], -1) + indices_shape[(batch_dims + 1) :]
)
ref_res = gather_nd_batch_dims_1_ref(data_np_reshape, indices_np_reshape)
out_shape = indices_shape[1 : (batch_dims + 1)] + ref_res.shape[1:]
ref_res = np.reshape(ref_res, out_shape)
elif batch_dims == 1:
ref_res = gather_nd_batch_dims_1_ref(data_np, indices_np)
else:
ref_res = data_np[tuple(indices_np)]
return ref_res
|
0
|
0
|
0.0
|
5
|
0
|
0
|
1.0
|
0
|
4
|
[] | 12,088 |
tests/components/sensibo/test_update.py
|
liangleslie/core
| 30,023 |
51626
|
"""The test for the sensibo update platform."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import patch
from pysensibo.model import SensiboData
from pytest import MonkeyPatch
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.util import dt
from tests.common import async_fire_time_changed
async def test_select(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo update."""
state1 = hass.states.get("update.hallway_update_available")
state2 = hass.states.get("update.kitchen_update_available")
assert state1.state == STATE_ON
assert state1.attributes["installed_version"] == "SKY30046"
assert state1.attributes["latest_version"] == "SKY30048"
assert state1.attributes["title"] == "skyv2"
assert state2.state == STATE_OFF
monkeypatch.setattr(get_data.parsed["ABC999111"], "fw_ver", "SKY30048")
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("update.hallway_update_available")
assert state1.state == STATE_OFF
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
1
|
[] | 324 |
tests/components/cloud/test_tts.py
|
tbarbette/core
| 30,023 |
6482044
|
"""Tests for cloud tts."""
from unittest.mock import Mock
from hass_nabucasa import voice
import pytest
import voluptuous as vol
from homeassistant.components.cloud import const, tts
@pytest.fixture()
def cloud_with_prefs(cloud_prefs):
"""Return a cloud mock with prefs."""
return Mock(client=Mock(prefs=cloud_prefs))
def test_default_exists():
"""Test our default language exists."""
assert const.DEFAULT_TTS_DEFAULT_VOICE in voice.MAP_VOICE
def test_schema():
"""Test schema."""
assert "nl-NL" in tts.SUPPORT_LANGUAGES
processed = tts.PLATFORM_SCHEMA({"platform": "cloud", "language": "nl-NL"})
assert processed["gender"] == "female"
with pytest.raises(vol.Invalid):
tts.PLATFORM_SCHEMA(
{"platform": "cloud", "language": "non-existing", "gender": "female"}
)
with pytest.raises(vol.Invalid):
tts.PLATFORM_SCHEMA(
{"platform": "cloud", "language": "nl-NL", "gender": "not-supported"}
)
# Should not raise
tts.PLATFORM_SCHEMA({"platform": "cloud", "language": "nl-NL", "gender": "female"})
tts.PLATFORM_SCHEMA({"platform": "cloud"})
async def test_prefs_default_voice(hass, cloud_with_prefs, cloud_prefs):
"""Test cloud provider uses the preferences."""
assert cloud_prefs.tts_default_voice == ("en-US", "female")
provider_pref = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, {}
)
provider_conf = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}),
{"language": "fr-FR", "gender": "female"},
None,
)
assert provider_pref.default_language == "en-US"
assert provider_pref.default_options == {"gender": "female"}
assert provider_conf.default_language == "fr-FR"
assert provider_conf.default_options == {"gender": "female"}
await cloud_prefs.async_update(tts_default_voice=("nl-NL", "male"))
await hass.async_block_till_done()
assert provider_pref.default_language == "nl-NL"
assert provider_pref.default_options == {"gender": "male"}
assert provider_conf.default_language == "fr-FR"
assert provider_conf.default_options == {"gender": "female"}
async def test_provider_properties(cloud_with_prefs):
"""Test cloud provider."""
provider = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, {}
)
assert provider.supported_options == ["gender"]
assert "nl-NL" in provider.supported_languages
async def test_get_tts_audio(cloud_with_prefs):
"""Test cloud provider."""
provider = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, {}
)
assert provider.supported_options == ["gender"]
assert "nl-NL" in provider.supported_languages
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
6
|
[] | 4,207 |
recipes/Python/576635_whois/recipe-576635.py
|
tdiprima/code
| 2,023 |
3388232
|
<gh_stars>1000+
"""whois.py
simple whois client
"""
import sys
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("whois.arin.net", 43))
s.send(sys.argv[1] + "\r\n")
response = ''
while True:
d = s.recv(4096)
response += d
if d == '':
break
s.close()
print
print response
|
2
|
0
|
0.0
|
3
|
0
|
2
|
1.0
|
0
|
2
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 19,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 19,
"endOffset": 5,
"startLine": 19,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 20,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 20,
"endOffset": 5,
"startLine": 20,
"startOffset": 0
},
"type": "CODE_SMELL"
}
] | 2,949 |
examples/images/network_in_network.py
|
jjpalacio/tflearn
| 10,882 |
323218
|
# -*- coding: utf-8 -*-
""" Network In Network.
Applying 'Network In Network' to CIFAR-10 classification task.
References:
Network In Network. <NAME>, <NAME> & <NAME>, 2014.
Links:
http://arxiv.org/pdf/1312.4400v3.pdf
"""
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, flatten
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.estimator import regression
# Data loading and preprocessing
from tflearn.datasets import cifar10
(X, Y), (X_test, Y_test) = cifar10.load_data()
X, Y = shuffle(X, Y)
Y = to_categorical(Y)
Y_test = to_categorical(Y_test)
# Building 'Network In Network'
network = input_data(shape=[None, 32, 32, 3])
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 160, 1, activation='relu')
network = conv_2d(network, 96, 1, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = dropout(network, 0.5)
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = avg_pool_2d(network, 3, strides=2)
network = dropout(network, 0.5)
network = conv_2d(network, 192, 3, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 10, 1, activation='relu')
network = avg_pool_2d(network, 8)
network = flatten(network)
network = regression(network, optimizer='adam',
loss='softmax_categorical_crossentropy',
learning_rate=0.001)
# Training
model = tflearn.DNN(network)
model.fit(X, Y, n_epoch=50, shuffle=True, validation_set=(X_test, Y_test),
show_metric=True, batch_size=128, run_id='cifar10_net_in_net')
| null | null | null | null | null | null | null | null | null |
[] | 7,920 |
infra/config/PRESUBMIT.py
|
chromium/chromium
| 14,668 |
65214
|
<filename>infra/config/PRESUBMIT.py
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enforces luci-milo.cfg consistency.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
PRESUBMIT_VERSION = '2.0.0'
USE_PYTHON3 = True
_IGNORE_FREEZE_FOOTER = 'Ignore-Freeze'
# The time module's handling of timezones is abysmal, so the boundaries are
# precomputed in UNIX time
_FREEZE_START = 1639641600 # 2021/12/16 00:00 -0800
_FREEZE_END = 1641196800 # 2022/01/03 00:00 -0800
def CheckFreeze(input_api, output_api):
if _FREEZE_START <= input_api.time.time() < _FREEZE_END:
footers = input_api.change.GitFootersFromDescription()
if _IGNORE_FREEZE_FOOTER not in footers:
def convert(t):
ts = input_api.time.localtime(t)
return input_api.time.strftime('%Y/%m/%d %H:%M %z', ts)
return [
output_api.PresubmitError(
'There is a prod freeze in effect from {} until {},'
' files in //infra/config cannot be modified'.format(
convert(_FREEZE_START), convert(_FREEZE_END)))
]
return []
def CheckTests(input_api, output_api):
glob = input_api.os_path.join(input_api.PresubmitLocalPath(), '*_test.py')
tests = input_api.canned_checks.GetUnitTests(input_api,
output_api,
input_api.glob(glob),
run_on_python2=False,
run_on_python3=True,
skip_shebang_check=True)
return input_api.RunTests(tests)
def CheckLintLuciMilo(input_api, output_api):
if ('infra/config/generated/luci/luci-milo.cfg' in input_api.LocalPaths()
or 'infra/config/lint-luci-milo.py' in input_api.LocalPaths()):
return input_api.RunTests([
input_api.Command(
name='lint-luci-milo',
cmd=[input_api.python_executable, 'lint-luci-milo.py'],
kwargs={},
message=output_api.PresubmitError),
])
return []
def CheckTestingBuildbot(input_api, output_api):
if ('infra/config/generated/luci/luci-milo.cfg' in input_api.LocalPaths() or
'infra/config/generated/luci/luci-milo-dev.cfg' in input_api.LocalPaths()
):
return input_api.RunTests([
input_api.Command(
name='testing/buildbot config checks',
cmd=[input_api.python_executable, input_api.os_path.join(
'..', '..', 'testing', 'buildbot',
'generate_buildbot_json.py',),
'--check'],
kwargs={},
message=output_api.PresubmitError),
])
return []
def CheckLucicfgGenOutputMain(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.CheckLucicfgGenOutput(
input_api, output_api, 'main.star'))
def CheckLucicfgGenOutputDev(input_api, output_api):
return input_api.RunTests(input_api.canned_checks.CheckLucicfgGenOutput(
input_api, output_api, 'dev.star'))
def CheckChangedLUCIConfigs(input_api, output_api):
return input_api.canned_checks.CheckChangedLUCIConfigs(
input_api, output_api)
# Footer indicating a CL that is trying to address an outage by some mechanism
# other than those in infra/config/outages
_OUTAGE_ACTION_FOOTER = 'Infra-Config-Outage-Action'
# Footer acknowledging that an outages configuration is in effect when making an
# unrelated change
_IGNORE_OUTAGE_FOOTER = 'Infra-Config-Ignore-Outage'
def CheckOutagesConfigOnCommit(input_api, output_api):
outages_pyl = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'generated/outages.pyl')
with open(outages_pyl) as f:
outages_config = input_api.ast.literal_eval(f.read())
if not outages_config:
footers = input_api.change.GitFootersFromDescription()
return [
output_api.PresubmitError(
'There is no outages configuration in effect, '
'please remove the {} footer from your CL description.'
.format(footer))
for footer in (_OUTAGE_ACTION_FOOTER, _IGNORE_OUTAGE_FOOTER)
if footer in footers
]
# Any of the config files under infra/config/outages
outages_config_files = set()
# Any of the config files under infra/config/generated
generated_config_files = set()
# Any config files that are not under infra/config/outages or
# infra/config/generated
config_files = set()
for p in input_api.LocalPaths():
if p in ('README.md', 'OWNERS'):
continue
if p.startswith('infra/config/outages/'):
outages_config_files.add(p)
continue
if p.startswith('infra/config/generated/'):
generated_config_files.add(p)
continue
config_files.add(p)
# If the only changes to non-generated config fies were the outages files,
# assume the change was addressing an outage and that no additional mechanism
# needs to be added
if outages_config_files and not config_files:
# REVIEWER: Should we prevent the footers from being here in this case?
return []
# If any non-generated, non-outages files were modified or if the generated
# config files were modified without any config files being modified (lucicfg
# change, etc.) then make sure the user knows that when the outages
# configuration is disabled, the generated configuration may change
if config_files or generated_config_files:
footers = input_api.change.GitFootersFromDescription()
has_action_footer = _OUTAGE_ACTION_FOOTER in footers
has_ignore_footer = _IGNORE_OUTAGE_FOOTER in footers
if has_action_footer and has_ignore_footer:
return [
output_api.PresubmitError(
'Only one of {} or {} should be present in your CL description'
.format(_OUTAGE_ACTION_FOOTER, _IGNORE_OUTAGE_FOOTER)),
]
if not has_action_footer and not has_ignore_footer:
outages_config_lines = ['{}: {}'.format(k, v)
for k, v in sorted(outages_config.items())]
return [
output_api.PresubmitError('\n'.join([
'The following outages configuration is in effect:\n {}'.format(
'\n '.join(outages_config_lines)),
('The effect of your change may not be visible '
'in the generated configuration.'),
('If your change is addressing the outage, '
'please add the footer {} with a link for the outage.'
).format(_OUTAGE_ACTION_FOOTER),
('If your change is not addressing the outage '
'but you still wish to land it, please add the footer '
'{} with a reason.').format(_IGNORE_OUTAGE_FOOTER),
('For more information on outages configuration, '
'see https://chromium.googlesource.com/chromium/src/+/HEAD/infra/config/outages'
),
])),
]
return []
|
9
|
0
|
0.0
|
25
|
0
|
9
|
1.0
|
0
|
29
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 22,
"message": "Rename function \"CheckFreeze\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 22,
"endOffset": 15,
"startLine": 22,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 41,
"message": "Rename function \"CheckTests\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 41,
"endOffset": 14,
"startLine": 41,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 52,
"message": "Rename function \"CheckLintLuciMilo\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 52,
"endOffset": 21,
"startLine": 52,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 64,
"message": "Rename function \"CheckTestingBuildbot\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 64,
"endOffset": 24,
"startLine": 64,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 80,
"message": "Rename function \"CheckLucicfgGenOutputMain\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 80,
"endOffset": 29,
"startLine": 80,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 84,
"message": "Rename function \"CheckLucicfgGenOutputDev\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 84,
"endOffset": 28,
"startLine": 84,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 88,
"message": "Rename function \"CheckChangedLUCIConfigs\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 88,
"endOffset": 27,
"startLine": 88,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 100,
"message": "Rename function \"CheckOutagesConfigOnCommit\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 100,
"endOffset": 30,
"startLine": 100,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 100,
"message": "Refactor this function to reduce its Cognitive Complexity from 18 to the 15 allowed.",
"textRange": {
"endLine": 100,
"endOffset": 30,
"startLine": 100,
"startOffset": 4
},
"type": "CODE_SMELL"
}
] | 397 |
tests/unit/utils/test_minion.py
|
tomdoherty/salt
| 9,425 |
6562654
|
<filename>tests/unit/utils/test_minion.py<gh_stars>1000+
import logging
import salt.utils.minion
from tests.support.mock import MagicMock, mock_open, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class FakeThreadingClass:
name = "thread-name"
class MinionUtilTestCase(TestCase):
"""
TestCase for salt.utils.minion
"""
def test__read_proc_file_multiprocessing_false(self):
"""
test get_minion_pillar when
target exists
"""
opts = {"multiprocessing": False}
proc_data = {
"tgt_type": "glob",
"jid": "20200310230030623022",
"tgt": "minion",
"pid": 12345,
"ret": "",
"user": "root",
"arg": [10],
"fun": "test.sleep",
}
fake_thread = FakeThreadingClass()
fake_thread.name = "20200310230030623022-Job-20200310230030623022"
with patch("os.getpid", MagicMock(return_value=12345)):
with patch("salt.utils.files.fopen", mock_open(read_data=b"msgpack")):
with patch("salt.payload.loads", MagicMock(return_value=proc_data)):
with patch(
"salt.utils.process.os_is_running", MagicMock(return_value=True)
):
with patch(
"threading.enumerate", MagicMock(return_value=[fake_thread])
):
with patch(
"salt.utils.minion._check_cmdline",
MagicMock(return_value=True),
):
data = salt.utils.minion._read_proc_file(
"/var/cache/salt/minion/proc/20200310230030623022",
opts,
)
self.assertEqual(data, proc_data)
opts = {"multiprocessing": False}
proc_data = {
"tgt_type": "glob",
"jid": "20200310230030623022",
"tgt": "minion",
"pid": 12345,
"ret": "",
"user": "root",
"arg": [10],
"fun": "test.sleep",
}
fake_thread = FakeThreadingClass()
fake_thread.name = "20200310230030623022"
with patch("os.getpid", MagicMock(return_value=12345)):
with patch("salt.utils.files.fopen", mock_open(read_data=b"msgpack")):
with patch("salt.payload.loads", MagicMock(return_value=proc_data)):
with patch(
"salt.utils.process.os_is_running", MagicMock(return_value=True)
):
with patch(
"threading.enumerate", MagicMock(return_value=[fake_thread])
):
with patch(
"salt.utils.minion._check_cmdline",
MagicMock(return_value=True),
):
data = salt.utils.minion._read_proc_file(
"/var/cache/salt/minion/proc/20200310230030623022",
opts,
)
self.assertEqual(data, proc_data)
opts = {"multiprocessing": False}
proc_data = {
"tgt_type": "glob",
"jid": "20200310230030623022",
"tgt": "minion",
"pid": 12345,
"ret": "",
"user": "root",
"arg": [10],
"fun": "test.sleep",
}
fake_thread = FakeThreadingClass()
fake_thread.name = "20200310230030623022"
with patch("os.getpid", MagicMock(return_value=12345)):
with patch("salt.utils.files.fopen", mock_open(read_data=b"msgpack")):
with patch("salt.payload.loads", MagicMock(return_value=proc_data)):
with patch(
"salt.utils.process.os_is_running", MagicMock(return_value=True)
):
with patch(
"threading.enumerate", MagicMock(return_value=[fake_thread])
):
with patch(
"salt.utils.minion._check_cmdline",
MagicMock(return_value=False),
):
with patch("os.remove", MagicMock(return_value=True)):
data = salt.utils.minion._read_proc_file(
"/var/cache/salt/minion/proc/20200310230030623022",
opts,
)
self.assertEqual(data, None)
def test__read_proc_file_multiprocessing_true(self):
"""
test get_minion_pillar when
target exists
"""
opts = {"multiprocessing": True}
proc_data = {
"tgt_type": "glob",
"jid": "20200310230030623022",
"tgt": "minion",
"pid": 12345,
"ret": "",
"user": "root",
"arg": [10],
"fun": "test.sleep",
}
with patch("os.getpid", MagicMock(return_value=12345)):
with patch("salt.utils.files.fopen", mock_open(read_data=b"msgpack")):
with patch("salt.payload.loads", MagicMock(return_value=proc_data)):
with patch(
"salt.utils.process.os_is_running", MagicMock(return_value=True)
):
with patch(
"salt.utils.minion._check_cmdline",
MagicMock(return_value=True),
):
data = salt.utils.minion._read_proc_file(
"/var/cache/salt/minion/proc/20200310230030623022", opts
)
self.assertEqual(data, None)
| null | null | null | null | null | null | null | null | null |
[] | 4,450 |
diagrams/outscale/__init__.py
|
analyticsftw/diagrams
| 17,037 |
6959
|
<reponame>analyticsftw/diagrams<filename>diagrams/outscale/__init__.py
from diagrams import Node
class _Outscale(Node):
_provider = "outscale"
_icon_dir = "resources/outscale"
fontcolor = "#ffffff"
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 62 |
tests/unit/test_fingerprint.py
|
flovouin/dash
| 17,143 |
2194014
|
<reponame>flovouin/dash<gh_stars>1000+
from dash.fingerprint import build_fingerprint, check_fingerprint
version = 1
hash_value = 1
valid_resources = [
{"path": "[email protected]", "fingerprint": "[email protected]"},
{
"path": "[email protected]",
"fingerprint": "[email protected]_1_1m1234567890abcdef.8.6.min.js",
"version": "1.1.1",
"hash": "1234567890abcdef",
},
{
"path": "[email protected]",
"fingerprint": "[email protected]_1_1-alpha_1m1234567890abcdef.8.6.min.js",
"version": "1.1.1-alpha.1",
"hash": "1234567890abcdef",
},
{
"path": "[email protected]",
"fingerprint": "[email protected]_1_1-alpha_x_y_y_X_Y_Z_1_2_3_metadata_xx_yy_zz_XX_YY_ZZ_11_22_33_mmm1234567890abcdefABCDEF.8.6.min.js",
"version": "1.1.1-alpha.x.y.y.X.Y.Z.1.2.3+metadata.xx.yy.zz.XX.YY.ZZ.11.22.33.mm",
"hash": "1234567890abcdefABCDEF",
},
{"path": "dash.plotly.js", "fingerprint": "dash.v1m1.plotly.js"},
{"path": "dash.plotly.j_s", "fingerprint": "dash.v1m1.plotly.j_s"},
{"path": "dash.plotly.css", "fingerprint": "dash.v1m1.plotly.css"},
{"path": "dash.plotly.xxx.yyy.zzz", "fingerprint": "dash.v1m1.plotly.xxx.yyy.zzz"},
{"path": "dash~plotly.js", "fingerprint": "dash~plotly.v1m1.js"},
{"path": "nested/folder/file.js", "fingerprint": "nested/folder/file.v1m1.js"},
{
# kind of pathological, but we have what looks like a version string
# in a different place - still works
"path": "nested.v2m2/folder/file.js",
"fingerprint": "nested.v2m2/folder/file.v1m1.js",
},
{
# even works if it gets doubled up in the right place
"path": "nested/folder/file.v2m2.js",
"fingerprint": "nested/folder/file.v1m1.v2m2.js",
},
{
"path": "nested.dotted/folder.structure/file.name.css",
"fingerprint": "nested.dotted/folder.structure/file.v1m1.name.css",
},
{
"path": "dash..plotly.js",
"fingerprint": "dash.v1_1_1m1234567890..plotly.js",
"version": "1.1.1",
"hash": "1234567890",
},
{
"path": "dash.",
"fingerprint": "dash.v1_1_1m1234567890.",
"version": "1.1.1",
"hash": "1234567890",
},
{
"path": "dash..",
"fingerprint": "dash.v1_1_1m1234567890..",
"version": "1.1.1",
"hash": "1234567890",
},
{
"path": "dash.js.",
"fingerprint": "dash.v1_1_1m1234567890.js.",
"version": "1.1.1",
"hash": "1234567890",
},
{
"path": "dash.j-s",
"fingerprint": "dash.v1_1_1m1234567890.j-s",
"version": "1.1.1",
"hash": "1234567890",
},
]
valid_fingerprints = [
"[email protected]_1_2m1571771240.8.6.min.js",
"dash.v1_1_1m1234567890.plotly.js",
"dash.v1_1_1m1234567890.plotly.j_s",
"dash.v1_1_1m1234567890.plotly.css",
"dash.v1_1_1m1234567890.plotly.xxx.yyy.zzz",
"dash.v1_1_1-alpha1m1234567890.plotly.js",
"dash.v1_1_1-alpha_3m1234567890.plotly.js",
"dash.v1_1_1m1234567890123.plotly.js",
"dash.v1_1_1m4bc3.plotly.js",
"dash~plotly.v1m1.js",
"nested/folder/file.v1m1.js",
"nested.dotted/folder.structure/file.v1m1.name.css",
# this one has a pattern that looks like the version string in the wrong place
# AND one in the right place.
"nested.v2m2/folder/file.v1m1.js",
"nested.v2m2.dotted/folder.structure/file.v1m1.name.css",
]
invalid_fingerprints = [
"dash.plotly.v1_1_1m1234567890.js",
"folder/dash.plotly.v1_1_1m1234567890.js",
"nested.v1m1/folder/file.js",
"nested.v1m1.dotted/folder.structure/file.name.css",
]
def test_fingerprint():
for resource in valid_resources:
# The fingerprint matches expectations
fingerprint = build_fingerprint(
resource.get("path"),
resource.get("version", version),
resource.get("hash", hash_value),
)
assert fingerprint == resource.get("fingerprint")
(original_path, has_fingerprint) = check_fingerprint(fingerprint)
# The inverse operation returns that the fingerprint was valid
# and the original path
assert has_fingerprint
assert original_path == resource.get("path")
for resource in valid_fingerprints:
(_, has_fingerprint) = check_fingerprint(resource)
assert has_fingerprint, resource
for resource in invalid_fingerprints:
(_, has_fingerprint) = check_fingerprint(resource)
assert not has_fingerprint, resource
|
1
|
0
|
0.0
|
3
|
0
|
1
|
1.0
|
0
|
4
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 7,
"message": "Define a constant instead of duplicating this literal \"[email protected]\" 4 times.",
"textRange": {
"endLine": 7,
"endOffset": 34,
"startLine": 7,
"startOffset": 13
},
"type": "CODE_SMELL"
}
] | 10,831 |
venv/lib/python3.7/site-packages/pyasn1_modules/rfc7773.py
|
nicholasadamou/StockBird
| 9,953 |
8450729
|
<gh_stars>1000+
#
# This file is part of pyasn1-modules software.
#
# Created by <NAME> with some assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Authentication Context Certificate Extension
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc7773.txt
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_modules import rfc5280
MAX = float('inf')
# Authentication Context Extension
e_legnamnden = univ.ObjectIdentifier('1.2.752.201')
id_eleg_ce = e_legnamnden + (5, )
id_ce_authContext = id_eleg_ce + (1, )
class AuthenticationContext(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contextType', char.UTF8String()),
namedtype.OptionalNamedType('contextInfo', char.UTF8String())
)
class AuthenticationContexts(univ.SequenceOf):
componentType = AuthenticationContext()
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
# Map of Certificate Extension OIDs to Extensions added to the
# ones that are in rfc5280.py
_certificateExtensionsMapUpdate = {
id_ce_authContext: AuthenticationContexts(),
}
rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 10,967 |
torch/quantization/quantize_jit.py
|
xiaohanhuang/pytorch
| 60,067 |
4016575
|
<reponame>xiaohanhuang/pytorch<gh_stars>1000+
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/quantize_jit.py`, while adding an import statement
here.
"""
from torch.ao.quantization.quantize_jit import (
_check_is_script_module,
_check_forward_method,
script_qconfig,
script_qconfig_dict,
fuse_conv_bn_jit,
_prepare_jit,
prepare_jit,
prepare_dynamic_jit,
_convert_jit,
convert_jit,
convert_dynamic_jit,
_quantize_jit,
quantize_jit,
quantize_dynamic_jit
)
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 12,511 |
basic/30_copy_deepcopy.py
|
subshine/tutorials
| 10,786 |
11309112
|
# View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
import copy
a = [1,2,3]
b = a
b[1]=22
print(a)
print(id(a) == id(b))
# deep copy
c = copy.deepcopy(a)
print(id(a) == id(c))
c[1] = 2
print(a)
a[1] = 111
print(c)
# shallow copy
a = [1,2,[3,4]]
d = copy.copy(a)
print(id(a) == id(d))
print(id(a[2]) == id(d[2]))
| null | null | null | null | null | null | null | null | null |
[] | 6,919 |
dir_test.py
|
nicetone/Python
| 28,321 |
3236300
|
# Script Name : dir_test.py
# Author : <NAME>
# Created : 29th November 2011
# Last Modified : by- <NAME> 05 Oct 2020
# Version : 1.0
# Modifications :
# Description : Tests to see if the directory testdir exists, if not it will create the directory for you if you want it created.
from __future__ import print_function
import os
try:
input = raw_input()
except NameError:
pass
def main():
CheckDir = input("Enter the name of the directory to check : ")
print()
if os.path.exists(CheckDir): # Checks if the dir exists
print("The directory exists")
else:
print("No directory found for " + CheckDir) # Output if no directory
print()
option = input("Would you like this directory create? y/n: ")
if option == 'n':
print("Goodbye")
exit()
if option == 'y':
os.makedirs(CheckDir) # Creates a new dir for the given name
print("Directory created for " + CheckDir)
else:
print("Not an option. Exiting")
exit()
if __name__ == '__main__':
main()
| null | null | null | null | null | null | null | null | null |
[] | 2,265 |
homeassistant/components/limitlessled/__init__.py
|
domwillcode/home-assistant
| 30,023 |
12856798
|
"""The limitlessled component."""
| null | null | null | null | null | null | null | null | null |
[] | 7,583 |
tests/components/econet/__init__.py
|
tbarbette/core
| 30,023 |
186790
|
<gh_stars>1000+
"""Tests for the Econet component."""
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 1,136 |
algorithms/tree/fenwick_tree/fenwick_tree.py
|
hbqdev/algorithms
| 22,426 |
11275911
|
"""
Fenwick Tree / Binary Indexed Tree
Consider we have an array arr[0 . . . n-1]. We would like to
1. Compute the sum of the first i elements.
2. Modify the value of a specified element of the array arr[i] = x where 0 <= i <= n-1.
A simple solution is to run a loop from 0 to i-1 and calculate the sum of the elements. To update a value, simply do arr[i] = x.
The first operation takes O(n) time and the second operation takes O(1) time.
Another simple solution is to create an extra array and store the sum of the first i-th elements at the i-th index in this new array.
The sum of a given range can now be calculated in O(1) time, but the update operation takes O(n) time now.
This works well if there are a large number of query operations but a very few number of update operations.
There are two solutions that can perform both the query and update operations in O(logn) time.
1. Fenwick Tree
2. Segment Tree
Compared with Segment Tree, Binary Indexed Tree requires less space and is easier to implement.
"""
class Fenwick_Tree(object):
def __init__(self, freq):
self.arr = freq
self.n = len(freq)
def get_sum(self, bit_tree, i):
"""
Returns sum of arr[0..index]. This function assumes that the array is preprocessed and partial sums of array elements are stored in bit_tree[].
"""
s = 0
# index in bit_tree[] is 1 more than the index in arr[]
i = i+1
# Traverse ancestors of bit_tree[index]
while i > 0:
# Add current element of bit_tree to sum
s += bit_tree[i]
# Move index to parent node in getSum View
i -= i & (-i)
return s
def update_bit(self, bit_tree, i, v):
"""
Updates a node in Binary Index Tree (bit_tree) at given index in bit_tree. The given value 'val' is added to bit_tree[i] and all of its ancestors in tree.
"""
# index in bit_ree[] is 1 more than the index in arr[]
i += 1
# Traverse all ancestors and add 'val'
while i <= self.n:
# Add 'val' to current node of bit_tree
bit_tree[i] += v
# Update index to that of parent in update View
i += i & (-i)
def construct(self):
"""
Constructs and returns a Binary Indexed Tree for given array of size n.
"""
# Create and initialize bit_ree[] as 0
bit_tree = [0]*(self.n+1)
# Store the actual values in bit_ree[] using update()
for i in range(self.n):
self.update_bit(bit_tree, i, self.arr[i])
return bit_tree
|
1
|
0
|
0.0
|
3
|
0
|
1
|
1.0
|
0
|
7
|
[
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 22,
"message": "Rename class \"Fenwick_Tree\" to match the regular expression ^_?([A-Z_][a-zA-Z0-9]*|[a-z_][a-z0-9_]*)$.",
"textRange": {
"endLine": 22,
"endOffset": 18,
"startLine": 22,
"startOffset": 6
},
"type": "CODE_SMELL"
}
] | 6,738 |
recipes/Python/300304_Live_Object_Browser/recipe-300304.py
|
tdiprima/code
| 2,023 |
6466906
|
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
class Browser:
def make_row( self, piter, name, value ):
info = repr(value)
if not hasattr(value, "__dict__"):
if len(info) > 80:
# it's a big list, or dict etc.
info = info[:80] + "..."
_piter = self.treestore.append( piter, [ name, type(value).__name__, info ] )
return _piter
def make_instance( self, value, piter ):
if hasattr( value, "__dict__" ):
for _name, _value in value.__dict__.items():
_piter = self.make_row( piter, "."+_name, _value )
_path = self.treestore.get_path( _piter )
self.otank[ _path ] = (_name, _value)
def make_mapping( self, value, piter ):
keys = []
if hasattr( value, "keys" ):
keys = value.keys()
elif hasattr( value, "__len__"):
keys = range( len(value) )
for key in keys:
_name = "[%s]"%str(key)
_piter = self.make_row( piter, _name, value[key] )
_path = self.treestore.get_path( _piter )
self.otank[ _path ] = (_name, value[key])
def make(self, name=None, value=None, path=None, depth=1):
if path is None:
# make root node
piter = self.make_row( None, name, value )
path = self.treestore.get_path( piter )
self.otank[ path ] = (name, value)
else:
name, value = self.otank[ path ]
piter = self.treestore.get_iter( path )
if not self.treestore.iter_has_child( piter ):
self.make_mapping( value, piter )
self.make_instance( value, piter )
if depth:
for i in range( self.treestore.iter_n_children( piter ) ):
self.make( path = path+(i,), depth = depth - 1 )
def row_expanded( self, treeview, piter, path ):
self.make( path = path )
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return gtk.FALSE
def __init__(self, name, value):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("Browser")
self.window.set_size_request(512, 320)
self.window.connect("delete_event", self.delete_event)
# we will store the name, the type name, and the repr
columns = [str,str,str]
self.treestore = gtk.TreeStore(*columns)
# the otank tells us what object we put at each node in the tree
self.otank = {} # map path -> (name,value)
self.make( name, value )
self.treeview = gtk.TreeView(self.treestore)
self.treeview.connect("row-expanded", self.row_expanded )
self.tvcolumns = [ gtk.TreeViewColumn() for _type in columns ]
i = 0
for tvcolumn in self.tvcolumns:
self.treeview.append_column(tvcolumn)
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', i)
i = i + 1
self.window.add(self.treeview)
self.window.show_all()
def dump( name, value ):
browser = Browser( name, value )
gtk.main()
def test():
class Nil:
pass
a = Nil()
b = Nil()
c = Nil()
d = Nil()
a.b=b
b.c=c
c.d=d
d.a=a # circular chain
dump( "a", a )
if __name__ == "__main__":
test()
|
2
|
0
|
0.0
|
17
|
0
|
2
|
1.0
|
0
|
21
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 11,
"message": "Merge this if statement with the enclosing one.",
"textRange": {
"endLine": 11,
"endOffset": 14,
"startLine": 11,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 91,
"message": "Remove the unused local variable \"browser\".",
"textRange": {
"endLine": 91,
"endOffset": 11,
"startLine": 91,
"startOffset": 4
},
"type": "CODE_SMELL"
}
] | 4,123 |
tests/components/iotawatt/__init__.py
|
MrDelik/core
| 30,023 |
1738332
|
<gh_stars>1000+
"""Tests for the IoTaWatt integration."""
from iotawattpy.sensor import Sensor
INPUT_SENSOR = Sensor(
channel="1",
base_name="My Sensor",
suffix=None,
io_type="Input",
unit="Watts",
value=23,
begin="",
mac_addr="mock-mac",
)
OUTPUT_SENSOR = Sensor(
channel="N/A",
base_name="My WattHour Sensor",
suffix=None,
io_type="Output",
unit="WattHours",
value=243,
begin="",
mac_addr="mock-mac",
fromStart=True,
)
INPUT_ACCUMULATED_SENSOR = Sensor(
channel="N/A",
base_name="My WattHour Accumulated Input Sensor",
suffix=".wh",
io_type="Input",
unit="WattHours",
value=500,
begin="",
mac_addr="mock-mac",
fromStart=False,
)
OUTPUT_ACCUMULATED_SENSOR = Sensor(
channel="N/A",
base_name="My WattHour Accumulated Output Sensor",
suffix=".wh",
io_type="Output",
unit="WattHours",
value=200,
begin="",
mac_addr="mock-mac",
fromStart=False,
)
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 1,867 |
recipes/Python/277600_one_liner_frequency_count/recipe-277600.py
|
tdiprima/code
| 2,023 |
3209145
|
>>> from itertools import groupby
>>> [(k, len(list(g))) for k, g in groupby(sorted(myList))]
[('1', 4), ('2', 1), ('3', 2), ('4', 1)]
|
0
|
0
|
0.0
| null |
0
|
0
|
1.0
|
0
| null |
[] | 2,049 |
utils/python_lint.py
|
jerodji/swift
| 72,551 |
8977665
|
<reponame>jerodji/swift
#!/usr/bin/env python3
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
"""
Utility script used to run the flake8 linter over all the project Python
sources.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import subprocess
import sys
__all__ = [
'lint',
]
# -----------------------------------------------------------------------------
# Constants
_UTILS_DIR = os.path.abspath(os.path.dirname(__file__))
_PROJECT_DIR = os.path.dirname(_UTILS_DIR)
_REQUIRED_PACKAGES = [
'flake8',
'flake8-import-order',
]
_INSTALL_FLAKE8_MESSAGE = """
The flake8 and flake8-import-order Python packages are required for linting,
but these were not found on your system.
You can install these using:
python -m pip install flake8
python -m pip install flake8-import-order
For more help, see http://flake8.pycqa.org.
"""
# -----------------------------------------------------------------------------
# Helpers
def _is_package_installed(name):
"""Runs the pip command to check if a package is installed.
"""
command = [
sys.executable,
'-m', 'pip',
'show', '--quiet',
name,
]
with open(os.devnull, 'w') as devnull:
status = subprocess.call(command, stderr=devnull)
return not status
# -----------------------------------------------------------------------------
def lint(args, verbose=False):
all_packages_installed = all([
_is_package_installed(name)
for name in _REQUIRED_PACKAGES
])
if not all_packages_installed:
if verbose:
print(_INSTALL_FLAKE8_MESSAGE)
return 1
return subprocess.call(
[sys.executable, '-m', 'flake8'] + args,
cwd=_PROJECT_DIR,
universal_newlines=True)
if __name__ == '__main__':
sys.exit(lint(sys.argv[1:], verbose=True))
|
0
|
0
|
0.0
|
4
|
0
|
0
|
1.0
|
0
|
5
|
[] | 13,109 |
manimlib/scene/three_d_scene.py
|
OrKedar/geo-manimgl-app
| 45,280 |
1679092
|
from manimlib.scene.scene import Scene
class ThreeDScene(Scene):
CONFIG = {
"camera_config": {
"samples": 4,
"anti_alias_width": 0,
}
}
def begin_ambient_camera_rotation(self, rate=0.02):
pass # TODO
def stop_ambient_camera_rotation(self):
pass # TODO
def move_camera(self,
phi=None,
theta=None,
distance=None,
gamma=None,
frame_center=None,
**kwargs):
pass # TODO
| null | null | null | null | null | null | null | null | null |
[] | 1,537 |
examples/pxScene2d/external/libnode-v10.15.3/deps/v8/tools/testrunner/outproc/test262.py
|
madanagopaltcomcast/pxCore
| 2,151 |
1022155
|
<gh_stars>1000+
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from . import base
class ExceptionOutProc(base.OutProc):
"""Output processor for tests with expected exception."""
def __init__(self, expected_outcomes, expected_exception=None):
super(ExceptionOutProc, self).__init__(expected_outcomes)
self._expected_exception = expected_exception
def _is_failure_output(self, output):
if output.exit_code != 0:
return True
if self._expected_exception != self._parse_exception(output.stdout):
return True
return 'FAILED!' in output.stdout
def _parse_exception(self, string):
# somefile:somelinenumber: someerror[: sometext]
# somefile might include an optional drive letter on windows e.g. "e:".
match = re.search(
'^(?:\w:)?[^:]*:[0-9]+: ([^: ]+?)($|: )', string, re.MULTILINE)
if match:
return match.group(1).strip()
else:
return None
def _is_failure_output(self, output):
return (
output.exit_code != 0 or
'FAILED!' in output.stdout
)
class NoExceptionOutProc(base.OutProc):
"""Output processor optimized for tests without expected exception."""
NoExceptionOutProc._is_failure_output = _is_failure_output
class PassNoExceptionOutProc(base.PassOutProc):
"""
Output processor optimized for tests expected to PASS without expected
exception.
"""
PassNoExceptionOutProc._is_failure_output = _is_failure_output
PASS_NO_EXCEPTION = PassNoExceptionOutProc()
| null | null | null | null | null | null | null | null | null |
[] | 13,218 |
tools/make_dist_html.py
|
libcat/libuv
| 20,206 |
11593868
|
#!/usr/bin/python3
import itertools
import os
import re
import subprocess
HTML = r'''
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="http://libuv.org/styles/vendor.css">
<link rel="stylesheet" href="http://libuv.org/styles/main.css">
<style>
table {{
border-spacing: 0;
}}
body table {{
margin: 0 0 0 12pt;
}}
th, td {{
padding: 2pt;
text-align: left;
vertical-align: top;
}}
table table {{
border-collapse: initial;
padding: 0 0 16pt 0;
}}
table table tr:nth-child(even) {{
background-color: #777;
}}
</style>
</head>
<body>
<table>{groups}</table>
</body>
</html>
'''
GROUPS = r'''
<tr>
<td>{groups[0]}</td>
<td>{groups[1]}</td>
<td>{groups[2]}</td>
<td>{groups[3]}</td>
</tr>
'''
GROUP = r'''
<table>
<tr>
<th>version</th>
<th>tarball</th>
<th>gpg</th>
<th>windows</th>
</tr>
{rows}
</table>
'''
ROW = r'''
<tr>
<td>
<a href="http://dist.libuv.org/dist/{tag}/">{tag}</a>
</td>
<td>
<a href="http://dist.libuv.org/dist/{tag}/libuv-{tag}.tar.gz">tarball</a>
</td>
<td>{maybe_gpg}</td>
<td>{maybe_exe}</td>
</tr>
'''
GPG = r'''
<a href="http://dist.libuv.org/dist/{tag}/libuv-{tag}.tar.gz.sign">gpg</a>
'''
# The binaries don't have a predictable name, link to the directory instead.
EXE = r'''
<a href="http://dist.libuv.org/dist/{tag}/">exe</a>
'''
def version(tag):
return list(map(int, re.match('^v(\d+)\.(\d+)\.(\d+)', tag).groups()))
def major_minor(tag):
return version(tag)[:2]
def row_for(tag):
maybe_gpg = ''
maybe_exe = ''
# We didn't start signing releases and producing Windows installers
# until v1.7.0.
if version(tag) >= version('v1.7.0'):
maybe_gpg = GPG.format(**locals())
maybe_exe = EXE.format(**locals())
return ROW.format(**locals())
def group_for(tags):
rows = ''.join(row_for(tag) for tag in tags)
return GROUP.format(rows=rows)
# Partition in groups of |n|.
def groups_for(groups, n=4):
html = ''
groups = groups[:] + [''] * (n - 1)
while len(groups) >= n:
html += GROUPS.format(groups=groups)
groups = groups[n:]
return html
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__))
tags = subprocess.check_output(['git', 'tag'], text=True)
tags = [tag for tag in tags.split('\n') if tag.startswith('v')]
tags.sort(key=version, reverse=True)
groups = [group_for(list(g)) for _, g in itertools.groupby(tags, major_minor)]
groups = groups_for(groups)
html = HTML.format(groups=groups).strip()
html = re.sub('>\\s+<', '><', html)
print(html)
| null | null | null | null | null | null | null | null | null |
[] | 10,116 |
superset/dashboards/filter_sets/filters.py
|
razzius/superset
| 18,621 |
6474850
|
<reponame>razzius/superset
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, TYPE_CHECKING
from flask import g
from sqlalchemy import and_, or_
from superset.dashboards.filter_sets.consts import DASHBOARD_OWNER_TYPE, USER_OWNER_TYPE
from superset.models.dashboard import dashboard_user
from superset.models.filter_set import FilterSet
from superset.views.base import BaseFilter, is_user_admin
if TYPE_CHECKING:
from sqlalchemy.orm.query import Query
class FilterSetFilter(BaseFilter): # pylint: disable=too-few-public-methods)
def apply(self, query: Query, value: Any) -> Query:
if is_user_admin():
return query
current_user_id = g.user.id
filter_set_ids_by_dashboard_owners = ( # pylint: disable=C0103
query.from_self(FilterSet.id)
.join(dashboard_user, FilterSet.owner_id == dashboard_user.c.dashboard_id)
.filter(
and_(
FilterSet.owner_type == DASHBOARD_OWNER_TYPE,
dashboard_user.c.user_id == current_user_id,
)
)
)
return query.filter(
or_(
and_(
FilterSet.owner_type == USER_OWNER_TYPE,
FilterSet.owner_id == current_user_id,
),
FilterSet.id.in_(filter_set_ids_by_dashboard_owners),
)
)
|
0
|
0
|
0.0
|
2
|
0
|
0
|
1.0
|
0
|
3
|
[] | 4,162 |
recipes/Python/82345_pykill/recipe-82345.py
|
tdiprima/code
| 2,023 |
556028
|
<reponame>tdiprima/code
#!/usr/bin/env python
import commands
import os,sys,string
import re
def pkill():
if len(sys.argv) <= 1:
print "usage: " + sys.argv[0] + " process_name"
sys.exit(1)
rip = sys.argv[1]
me = commands.getoutput("whoami")
p = commands.getoutput("ps -u %s -o fname -o pid" % me)
ids = p.split("\n")
for id in ids:
if id.find(rip) < 0:
continue
regex = re.compile(r'(\d+).*',re.I)
id = regex.sub(r'\1', id)
print "killing: %s\n" % id
commands.getoutput("kill -15 %s" % id)
if commands.getoutput("ps -u %s -o fname -o pid" % me).find(id) > -1:
print "slaughtering: %s\n" % id
commands.getoutput("kill -9 %s" % id)
if __name__ == '__main__' :
pkill()
| null | null | null | null | null | null | null | null | null |
[] | 10,662 |
recipes/Python/576370_Weighted_random_choice/recipe-576370.py
|
tdiprima/code
| 2,023 |
189115
|
<reponame>tdiprima/code<filename>recipes/Python/576370_Weighted_random_choice/recipe-576370.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import random, bisect
class ItemGenerator(object):
'''Choices randomly an element from a list.
It does it not uniformly, but using a given weight for
each element.
Just instantiate this class passing a list of pairs
(item, weight), and then call it to get the items.
'''
def __init__(self, items):
self.puntos = []
self.ponderado = []
total = sum(x[1] for x in items)
acum = 0
for it,peso in items:
acum += peso
self.puntos.append(it)
self.ponderado.append(acum/total)
self.total = acum - 1
def __call__(self):
ind = random.random()
cual = bisect.bisect(self.ponderado, ind)
return self.puntos[cual]
if __name__ == "__main__":
# This shows the usage, and also test the recipe, as calling that
# a lot of times, it should return the elements in the same
# given proportion
items = (
("A", 10),
("B", 100),
("C", 5)
)
itgen = ItemGenerator(items)
cuenta = {}
for i in range(1000000):
item = itgen()
cuenta[item] = cuenta.get(item, 0) + 1
print cuenta
|
1
|
0
|
0.0
|
4
|
0
|
1
|
1.0
|
1
|
5
|
[
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 48,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 48,
"endOffset": 9,
"startLine": 48,
"startOffset": 4
},
"type": "CODE_SMELL"
}
] | 1,150 |
tests/components/minio/__init__.py
|
domwillcode/home-assistant
| 30,023 |
12862327
|
<reponame>domwillcode/home-assistant<gh_stars>1000+
"""Tests for the minio component."""
| null | null | null | null | null | null | null | null | null |
[] | 7,607 |
Lib/encodings/cp737.py
|
shawwn/cpython
| 52,316 |
11594412
|
""" Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp737',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b7: 0x00fa, # MIDDLE DOT
0x00f7: 0x00f6, # DIVISION SIGN
0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x008b, # GREEK CAPITAL LETTER MU
0x039d: 0x008c, # GREEK CAPITAL LETTER NU
0x039e: 0x008d, # GREEK CAPITAL LETTER XI
0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
0x03b2: 0x0099, # GREEK SMALL LETTER BETA
0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
0x03b7: 0x009e, # GREEK SMALL LETTER ETA
0x03b8: 0x009f, # GREEK SMALL LETTER THETA
0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00a3, # GREEK SMALL LETTER MU
0x03bd: 0x00a4, # GREEK SMALL LETTER NU
0x03be: 0x00a5, # GREEK SMALL LETTER XI
0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00a7, # GREEK SMALL LETTER PI
0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
0x03c8: 0x00af, # GREEK SMALL LETTER PSI
0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| null | null | null | null | null | null | null | null | null |
[] | 10,120 |
homeassistant/util/color.py
|
andersop91/core
| 22,481 |
6562871
|
<gh_stars>1000+
"""Color util methods."""
from __future__ import annotations
import colorsys
import math
from typing import NamedTuple, cast
import attr
# mypy: disallow-any-generics
class RGBColor(NamedTuple):
"""RGB hex values."""
r: int
g: int
b: int
# Official CSS3 colors from w3.org:
# https://www.w3.org/TR/2010/PR-css3-color-20101028/#html4
# names do not have spaces in them so that we can compare against
# requests more easily (by removing spaces from the requests as well).
# This lets "dark seagreen" and "dark sea green" both match the same
# color "darkseagreen".
COLORS = {
"aliceblue": RGBColor(240, 248, 255),
"antiquewhite": RGBColor(250, 235, 215),
"aqua": RGBColor(0, 255, 255),
"aquamarine": RGBColor(127, 255, 212),
"azure": RGBColor(240, 255, 255),
"beige": RGBColor(245, 245, 220),
"bisque": RGBColor(255, 228, 196),
"black": RGBColor(0, 0, 0),
"blanchedalmond": RGBColor(255, 235, 205),
"blue": RGBColor(0, 0, 255),
"blueviolet": RGBColor(138, 43, 226),
"brown": RGBColor(165, 42, 42),
"burlywood": RGBColor(222, 184, 135),
"cadetblue": RGBColor(95, 158, 160),
"chartreuse": RGBColor(127, 255, 0),
"chocolate": RGBColor(210, 105, 30),
"coral": RGBColor(255, 127, 80),
"cornflowerblue": RGBColor(100, 149, 237),
"cornsilk": RGBColor(255, 248, 220),
"crimson": RGBColor(220, 20, 60),
"cyan": RGBColor(0, 255, 255),
"darkblue": RGBColor(0, 0, 139),
"darkcyan": RGBColor(0, 139, 139),
"darkgoldenrod": RGBColor(184, 134, 11),
"darkgray": RGBColor(169, 169, 169),
"darkgreen": RGBColor(0, 100, 0),
"darkgrey": RGBColor(169, 169, 169),
"darkkhaki": RGBColor(189, 183, 107),
"darkmagenta": RGBColor(139, 0, 139),
"darkolivegreen": RGBColor(85, 107, 47),
"darkorange": RGBColor(255, 140, 0),
"darkorchid": RGBColor(153, 50, 204),
"darkred": RGBColor(139, 0, 0),
"darksalmon": RGBColor(233, 150, 122),
"darkseagreen": RGBColor(143, 188, 143),
"darkslateblue": RGBColor(72, 61, 139),
"darkslategray": RGBColor(47, 79, 79),
"darkslategrey": RGBColor(47, 79, 79),
"darkturquoise": RGBColor(0, 206, 209),
"darkviolet": RGBColor(148, 0, 211),
"deeppink": RGBColor(255, 20, 147),
"deepskyblue": RGBColor(0, 191, 255),
"dimgray": RGBColor(105, 105, 105),
"dimgrey": RGBColor(105, 105, 105),
"dodgerblue": RGBColor(30, 144, 255),
"firebrick": RGBColor(178, 34, 34),
"floralwhite": RGBColor(255, 250, 240),
"forestgreen": RGBColor(34, 139, 34),
"fuchsia": RGBColor(255, 0, 255),
"gainsboro": RGBColor(220, 220, 220),
"ghostwhite": RGBColor(248, 248, 255),
"gold": RGBColor(255, 215, 0),
"goldenrod": RGBColor(218, 165, 32),
"gray": RGBColor(128, 128, 128),
"green": RGBColor(0, 128, 0),
"greenyellow": RGBColor(173, 255, 47),
"grey": RGBColor(128, 128, 128),
"honeydew": RGBColor(240, 255, 240),
"hotpink": RGBColor(255, 105, 180),
"indianred": RGBColor(205, 92, 92),
"indigo": RGBColor(75, 0, 130),
"ivory": RGBColor(255, 255, 240),
"khaki": RGBColor(240, 230, 140),
"lavender": RGBColor(230, 230, 250),
"lavenderblush": RGBColor(255, 240, 245),
"lawngreen": RGBColor(124, 252, 0),
"lemonchiffon": RGBColor(255, 250, 205),
"lightblue": RGBColor(173, 216, 230),
"lightcoral": RGBColor(240, 128, 128),
"lightcyan": RGBColor(224, 255, 255),
"lightgoldenrodyellow": RGBColor(250, 250, 210),
"lightgray": RGBColor(211, 211, 211),
"lightgreen": RGBColor(144, 238, 144),
"lightgrey": RGBColor(211, 211, 211),
"lightpink": RGBColor(255, 182, 193),
"lightsalmon": RGBColor(255, 160, 122),
"lightseagreen": RGBColor(32, 178, 170),
"lightskyblue": RGBColor(135, 206, 250),
"lightslategray": RGBColor(119, 136, 153),
"lightslategrey": RGBColor(119, 136, 153),
"lightsteelblue": RGBColor(176, 196, 222),
"lightyellow": RGBColor(255, 255, 224),
"lime": RGBColor(0, 255, 0),
"limegreen": RGBColor(50, 205, 50),
"linen": RGBColor(250, 240, 230),
"magenta": RGBColor(255, 0, 255),
"maroon": RGBColor(128, 0, 0),
"mediumaquamarine": RGBColor(102, 205, 170),
"mediumblue": RGBColor(0, 0, 205),
"mediumorchid": RGBColor(186, 85, 211),
"mediumpurple": RGBColor(147, 112, 219),
"mediumseagreen": RGBColor(60, 179, 113),
"mediumslateblue": RGBColor(123, 104, 238),
"mediumspringgreen": RGBColor(0, 250, 154),
"mediumturquoise": RGBColor(72, 209, 204),
"mediumvioletred": RGBColor(199, 21, 133),
"midnightblue": RGBColor(25, 25, 112),
"mintcream": RGBColor(245, 255, 250),
"mistyrose": RGBColor(255, 228, 225),
"moccasin": RGBColor(255, 228, 181),
"navajowhite": RGBColor(255, 222, 173),
"navy": RGBColor(0, 0, 128),
"navyblue": RGBColor(0, 0, 128),
"oldlace": RGBColor(253, 245, 230),
"olive": RGBColor(128, 128, 0),
"olivedrab": RGBColor(107, 142, 35),
"orange": RGBColor(255, 165, 0),
"orangered": RGBColor(255, 69, 0),
"orchid": RGBColor(218, 112, 214),
"palegoldenrod": RGBColor(238, 232, 170),
"palegreen": RGBColor(152, 251, 152),
"paleturquoise": RGBColor(175, 238, 238),
"palevioletred": RGBColor(219, 112, 147),
"papayawhip": RGBColor(255, 239, 213),
"peachpuff": RGBColor(255, 218, 185),
"peru": RGBColor(205, 133, 63),
"pink": RGBColor(255, 192, 203),
"plum": RGBColor(221, 160, 221),
"powderblue": RGBColor(176, 224, 230),
"purple": RGBColor(128, 0, 128),
"red": RGBColor(255, 0, 0),
"rosybrown": RGBColor(188, 143, 143),
"royalblue": RGBColor(65, 105, 225),
"saddlebrown": RGBColor(139, 69, 19),
"salmon": RGBColor(250, 128, 114),
"sandybrown": RGBColor(244, 164, 96),
"seagreen": RGBColor(46, 139, 87),
"seashell": RGBColor(255, 245, 238),
"sienna": RGBColor(160, 82, 45),
"silver": RGBColor(192, 192, 192),
"skyblue": RGBColor(135, 206, 235),
"slateblue": RGBColor(106, 90, 205),
"slategray": RGBColor(112, 128, 144),
"slategrey": RGBColor(112, 128, 144),
"snow": RGBColor(255, 250, 250),
"springgreen": RGBColor(0, 255, 127),
"steelblue": RGBColor(70, 130, 180),
"tan": RGBColor(210, 180, 140),
"teal": RGBColor(0, 128, 128),
"thistle": RGBColor(216, 191, 216),
"tomato": RGBColor(255, 99, 71),
"turquoise": RGBColor(64, 224, 208),
"violet": RGBColor(238, 130, 238),
"wheat": RGBColor(245, 222, 179),
"white": RGBColor(255, 255, 255),
"whitesmoke": RGBColor(245, 245, 245),
"yellow": RGBColor(255, 255, 0),
"yellowgreen": RGBColor(154, 205, 50),
# And...
"homeassistant": RGBColor(3, 169, 244),
}
@attr.s()
class XYPoint:
"""Represents a CIE 1931 XY coordinate pair."""
x: float = attr.ib() # pylint: disable=invalid-name
y: float = attr.ib() # pylint: disable=invalid-name
@attr.s()
class GamutType:
"""Represents the Gamut of a light."""
# ColorGamut = gamut(xypoint(xR,yR),xypoint(xG,yG),xypoint(xB,yB))
red: XYPoint = attr.ib()
green: XYPoint = attr.ib()
blue: XYPoint = attr.ib()
def color_name_to_rgb(color_name: str) -> RGBColor:
"""Convert color name to RGB hex value."""
# COLORS map has no spaces in it, so make the color_name have no
# spaces in it as well for matching purposes
hex_value = COLORS.get(color_name.replace(" ", "").lower())
if not hex_value:
raise ValueError("Unknown color")
return hex_value
# pylint: disable=invalid-name
def color_RGB_to_xy(
iR: int, iG: int, iB: int, Gamut: GamutType | None = None
) -> tuple[float, float]:
"""Convert from RGB color to XY color."""
return color_RGB_to_xy_brightness(iR, iG, iB, Gamut)[:2]
# Taken from:
# https://github.com/PhilipsHue/PhilipsHueSDK-iOS-OSX/blob/00187a3/ApplicationDesignNotes/RGB%20to%20xy%20Color%20conversion.md
# License: Code is given as is. Use at your own risk and discretion.
def color_RGB_to_xy_brightness(
iR: int, iG: int, iB: int, Gamut: GamutType | None = None
) -> tuple[float, float, int]:
"""Convert from RGB color to XY color."""
if iR + iG + iB == 0:
return 0.0, 0.0, 0
R = iR / 255
B = iB / 255
G = iG / 255
# Gamma correction
R = pow((R + 0.055) / (1.0 + 0.055), 2.4) if (R > 0.04045) else (R / 12.92)
G = pow((G + 0.055) / (1.0 + 0.055), 2.4) if (G > 0.04045) else (G / 12.92)
B = pow((B + 0.055) / (1.0 + 0.055), 2.4) if (B > 0.04045) else (B / 12.92)
# Wide RGB D65 conversion formula
X = R * 0.664511 + G * 0.154324 + B * 0.162028
Y = R * 0.283881 + G * 0.668433 + B * 0.047685
Z = R * 0.000088 + G * 0.072310 + B * 0.986039
# Convert XYZ to xy
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
# Brightness
Y = 1 if Y > 1 else Y
brightness = round(Y * 255)
# Check if the given xy value is within the color-reach of the lamp.
if Gamut:
in_reach = check_point_in_lamps_reach((x, y), Gamut)
if not in_reach:
xy_closest = get_closest_point_to_point((x, y), Gamut)
x = xy_closest[0]
y = xy_closest[1]
return round(x, 3), round(y, 3), brightness
def color_xy_to_RGB(
vX: float, vY: float, Gamut: GamutType | None = None
) -> tuple[int, int, int]:
"""Convert from XY to a normalized RGB."""
return color_xy_brightness_to_RGB(vX, vY, 255, Gamut)
# Converted to Python from Obj-C, original source from:
# https://github.com/PhilipsHue/PhilipsHueSDK-iOS-OSX/blob/00187a3/ApplicationDesignNotes/RGB%20to%20xy%20Color%20conversion.md
def color_xy_brightness_to_RGB(
vX: float, vY: float, ibrightness: int, Gamut: GamutType | None = None
) -> tuple[int, int, int]:
"""Convert from XYZ to RGB."""
if Gamut and not check_point_in_lamps_reach((vX, vY), Gamut):
xy_closest = get_closest_point_to_point((vX, vY), Gamut)
vX = xy_closest[0]
vY = xy_closest[1]
brightness = ibrightness / 255.0
if brightness == 0.0:
return (0, 0, 0)
Y = brightness
if vY == 0.0:
vY += 0.00000000001
X = (Y / vY) * vX
Z = (Y / vY) * (1 - vX - vY)
# Convert to RGB using Wide RGB D65 conversion.
r = X * 1.656492 - Y * 0.354851 - Z * 0.255038
g = -X * 0.707196 + Y * 1.655397 + Z * 0.036152
b = X * 0.051713 - Y * 0.121364 + Z * 1.011530
# Apply reverse gamma correction.
r, g, b = map(
lambda x: (12.92 * x)
if (x <= 0.0031308)
else ((1.0 + 0.055) * cast(float, pow(x, (1.0 / 2.4))) - 0.055),
[r, g, b],
)
# Bring all negative components to zero.
r, g, b = map(lambda x: max(0, x), [r, g, b])
# If one component is greater than 1, weight components by that value.
max_component = max(r, g, b)
if max_component > 1:
r, g, b = map(lambda x: x / max_component, [r, g, b])
ir, ig, ib = map(lambda x: int(x * 255), [r, g, b])
return (ir, ig, ib)
def color_hsb_to_RGB(fH: float, fS: float, fB: float) -> tuple[int, int, int]:
"""Convert a hsb into its rgb representation."""
if fS == 0.0:
fV = int(fB * 255)
return fV, fV, fV
r = g = b = 0
h = fH / 60
f = h - float(math.floor(h))
p = fB * (1 - fS)
q = fB * (1 - fS * f)
t = fB * (1 - (fS * (1 - f)))
if int(h) == 0:
r = int(fB * 255)
g = int(t * 255)
b = int(p * 255)
elif int(h) == 1:
r = int(q * 255)
g = int(fB * 255)
b = int(p * 255)
elif int(h) == 2:
r = int(p * 255)
g = int(fB * 255)
b = int(t * 255)
elif int(h) == 3:
r = int(p * 255)
g = int(q * 255)
b = int(fB * 255)
elif int(h) == 4:
r = int(t * 255)
g = int(p * 255)
b = int(fB * 255)
elif int(h) == 5:
r = int(fB * 255)
g = int(p * 255)
b = int(q * 255)
return (r, g, b)
def color_RGB_to_hsv(iR: float, iG: float, iB: float) -> tuple[float, float, float]:
"""Convert an rgb color to its hsv representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fHSV = colorsys.rgb_to_hsv(iR / 255.0, iG / 255.0, iB / 255.0)
return round(fHSV[0] * 360, 3), round(fHSV[1] * 100, 3), round(fHSV[2] * 100, 3)
def color_RGB_to_hs(iR: float, iG: float, iB: float) -> tuple[float, float]:
"""Convert an rgb color to its hs representation."""
return color_RGB_to_hsv(iR, iG, iB)[:2]
def color_hsv_to_RGB(iH: float, iS: float, iV: float) -> tuple[int, int, int]:
"""Convert an hsv color into its rgb representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fRGB = colorsys.hsv_to_rgb(iH / 360, iS / 100, iV / 100)
return (int(fRGB[0] * 255), int(fRGB[1] * 255), int(fRGB[2] * 255))
def color_hs_to_RGB(iH: float, iS: float) -> tuple[int, int, int]:
"""Convert an hsv color into its rgb representation."""
return color_hsv_to_RGB(iH, iS, 100)
def color_xy_to_hs(
vX: float, vY: float, Gamut: GamutType | None = None
) -> tuple[float, float]:
"""Convert an xy color to its hs representation."""
h, s, _ = color_RGB_to_hsv(*color_xy_to_RGB(vX, vY, Gamut))
return h, s
def color_hs_to_xy(
iH: float, iS: float, Gamut: GamutType | None = None
) -> tuple[float, float]:
"""Convert an hs color to its xy representation."""
return color_RGB_to_xy(*color_hs_to_RGB(iH, iS), Gamut)
def match_max_scale(
input_colors: tuple[int, ...], output_colors: tuple[float, ...]
) -> tuple[int, ...]:
"""Match the maximum value of the output to the input."""
max_in = max(input_colors)
max_out = max(output_colors)
if max_out == 0:
factor = 0.0
else:
factor = max_in / max_out
return tuple(int(round(i * factor)) for i in output_colors)
def color_rgb_to_rgbw(r: int, g: int, b: int) -> tuple[int, int, int, int]:
"""Convert an rgb color to an rgbw representation."""
# Calculate the white channel as the minimum of input rgb channels.
# Subtract the white portion from the remaining rgb channels.
w = min(r, g, b)
rgbw = (r - w, g - w, b - w, w)
# Match the output maximum value to the input. This ensures the full
# channel range is used.
return match_max_scale((r, g, b), rgbw) # type: ignore[return-value]
def color_rgbw_to_rgb(r: int, g: int, b: int, w: int) -> tuple[int, int, int]:
"""Convert an rgbw color to an rgb representation."""
# Add the white channel to the rgb channels.
rgb = (r + w, g + w, b + w)
# Match the output maximum value to the input. This ensures the
# output doesn't overflow.
return match_max_scale((r, g, b, w), rgb) # type: ignore[return-value]
def color_rgb_to_rgbww(
r: int, g: int, b: int, min_mireds: int, max_mireds: int
) -> tuple[int, int, int, int, int]:
"""Convert an rgb color to an rgbww representation."""
# Find the color temperature when both white channels have equal brightness
mired_range = max_mireds - min_mireds
mired_midpoint = min_mireds + mired_range / 2
color_temp_kelvin = color_temperature_mired_to_kelvin(mired_midpoint)
w_r, w_g, w_b = color_temperature_to_rgb(color_temp_kelvin)
# Find the ratio of the midpoint white in the input rgb channels
white_level = min(
r / w_r if w_r else 0, g / w_g if w_g else 0, b / w_b if w_b else 0
)
# Subtract the white portion from the rgb channels.
rgb = (r - w_r * white_level, g - w_g * white_level, b - w_b * white_level)
rgbww = (*rgb, round(white_level * 255), round(white_level * 255))
# Match the output maximum value to the input. This ensures the full
# channel range is used.
return match_max_scale((r, g, b), rgbww) # type: ignore[return-value]
def color_rgbww_to_rgb(
r: int, g: int, b: int, cw: int, ww: int, min_mireds: int, max_mireds: int
) -> tuple[int, int, int]:
"""Convert an rgbww color to an rgb representation."""
# Calculate color temperature of the white channels
mired_range = max_mireds - min_mireds
try:
ct_ratio = ww / (cw + ww)
except ZeroDivisionError:
ct_ratio = 0.5
color_temp_mired = min_mireds + ct_ratio * mired_range
color_temp_kelvin = color_temperature_mired_to_kelvin(color_temp_mired)
w_r, w_g, w_b = color_temperature_to_rgb(color_temp_kelvin)
white_level = max(cw, ww) / 255
# Add the white channels to the rgb channels.
rgb = (r + w_r * white_level, g + w_g * white_level, b + w_b * white_level)
# Match the output maximum value to the input. This ensures the
# output doesn't overflow.
return match_max_scale((r, g, b, cw, ww), rgb) # type: ignore[return-value]
def color_rgb_to_hex(r: int, g: int, b: int) -> str:
"""Return a RGB color from a hex color string."""
return f"{round(r):02x}{round(g):02x}{round(b):02x}"
def rgb_hex_to_rgb_list(hex_string: str) -> list[int]:
"""Return an RGB color value list from a hex color string."""
return [
int(hex_string[i : i + len(hex_string) // 3], 16)
for i in range(0, len(hex_string), len(hex_string) // 3)
]
def color_temperature_to_hs(color_temperature_kelvin: float) -> tuple[float, float]:
"""Return an hs color from a color temperature in Kelvin."""
return color_RGB_to_hs(*color_temperature_to_rgb(color_temperature_kelvin))
def color_temperature_to_rgb(
color_temperature_kelvin: float,
) -> tuple[float, float, float]:
"""
Return an RGB color from a color temperature in Kelvin.
This is a rough approximation based on the formula provided by T. Helland
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
# range check
if color_temperature_kelvin < 1000:
color_temperature_kelvin = 1000
elif color_temperature_kelvin > 40000:
color_temperature_kelvin = 40000
tmp_internal = color_temperature_kelvin / 100.0
red = _get_red(tmp_internal)
green = _get_green(tmp_internal)
blue = _get_blue(tmp_internal)
return red, green, blue
def color_temperature_to_rgbww(
temperature: int, brightness: int, min_mireds: int, max_mireds: int
) -> tuple[int, int, int, int, int]:
"""Convert color temperature to rgbcw."""
mired_range = max_mireds - min_mireds
warm = ((max_mireds - temperature) / mired_range) * brightness
cold = brightness - warm
return (0, 0, 0, round(cold), round(warm))
def _clamp(color_component: float, minimum: float = 0, maximum: float = 255) -> float:
"""
Clamp the given color component value between the given min and max values.
The range defined by the minimum and maximum values is inclusive, i.e. given a
color_component of 0 and a minimum of 10, the returned value is 10.
"""
color_component_out = max(color_component, minimum)
return min(color_component_out, maximum)
def _get_red(temperature: float) -> float:
"""Get the red component of the temperature in RGB space."""
if temperature <= 66:
return 255
tmp_red = 329.698727446 * math.pow(temperature - 60, -0.1332047592)
return _clamp(tmp_red)
def _get_green(temperature: float) -> float:
"""Get the green component of the given color temp in RGB space."""
if temperature <= 66:
green = 99.4708025861 * math.log(temperature) - 161.1195681661
else:
green = 288.1221695283 * math.pow(temperature - 60, -0.0755148492)
return _clamp(green)
def _get_blue(temperature: float) -> float:
"""Get the blue component of the given color temperature in RGB space."""
if temperature >= 66:
return 255
if temperature <= 19:
return 0
blue = 138.5177312231 * math.log(temperature - 10) - 305.0447927307
return _clamp(blue)
def color_temperature_mired_to_kelvin(mired_temperature: float) -> int:
"""Convert absolute mired shift to degrees kelvin."""
return math.floor(1000000 / mired_temperature)
def color_temperature_kelvin_to_mired(kelvin_temperature: float) -> int:
"""Convert degrees kelvin to mired shift."""
return math.floor(1000000 / kelvin_temperature)
# The following 5 functions are adapted from rgbxy provided by <NAME>
# License: The MIT License (MIT), 2014.
# https://github.com/benknight/hue-python-rgb-converter
def cross_product(p1: XYPoint, p2: XYPoint) -> float:
"""Calculate the cross product of two XYPoints."""
return float(p1.x * p2.y - p1.y * p2.x)
def get_distance_between_two_points(one: XYPoint, two: XYPoint) -> float:
"""Calculate the distance between two XYPoints."""
dx = one.x - two.x
dy = one.y - two.y
return math.sqrt(dx * dx + dy * dy)
def get_closest_point_to_line(A: XYPoint, B: XYPoint, P: XYPoint) -> XYPoint:
"""
Find the closest point from P to a line defined by A and B.
This point will be reproducible by the lamp
as it is on the edge of the gamut.
"""
AP = XYPoint(P.x - A.x, P.y - A.y)
AB = XYPoint(B.x - A.x, B.y - A.y)
ab2 = AB.x * AB.x + AB.y * AB.y
ap_ab = AP.x * AB.x + AP.y * AB.y
t = ap_ab / ab2
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
return XYPoint(A.x + AB.x * t, A.y + AB.y * t)
def get_closest_point_to_point(
xy_tuple: tuple[float, float], Gamut: GamutType
) -> tuple[float, float]:
"""
Get the closest matching color within the gamut of the light.
Should only be used if the supplied color is outside of the color gamut.
"""
xy_point = XYPoint(xy_tuple[0], xy_tuple[1])
# find the closest point on each line in the CIE 1931 'triangle'.
pAB = get_closest_point_to_line(Gamut.red, Gamut.green, xy_point)
pAC = get_closest_point_to_line(Gamut.blue, Gamut.red, xy_point)
pBC = get_closest_point_to_line(Gamut.green, Gamut.blue, xy_point)
# Get the distances per point and see which point is closer to our Point.
dAB = get_distance_between_two_points(xy_point, pAB)
dAC = get_distance_between_two_points(xy_point, pAC)
dBC = get_distance_between_two_points(xy_point, pBC)
lowest = dAB
closest_point = pAB
if dAC < lowest:
lowest = dAC
closest_point = pAC
if dBC < lowest:
lowest = dBC
closest_point = pBC
# Change the xy value to a value which is within the reach of the lamp.
cx = closest_point.x
cy = closest_point.y
return (cx, cy)
def check_point_in_lamps_reach(p: tuple[float, float], Gamut: GamutType) -> bool:
"""Check if the provided XYPoint can be recreated by a Hue lamp."""
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
q = XYPoint(p[0] - Gamut.red.x, p[1] - Gamut.red.y)
s = cross_product(q, v2) / cross_product(v1, v2)
t = cross_product(v1, q) / cross_product(v1, v2)
return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)
def check_valid_gamut(Gamut: GamutType) -> bool:
"""Check if the supplied gamut is valid."""
# Check if the three points of the supplied gamut are not on the same line.
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
not_on_line = cross_product(v1, v2) > 0.0001
# Check if all six coordinates of the gamut lie between 0 and 1.
red_valid = (
Gamut.red.x >= 0 and Gamut.red.x <= 1 and Gamut.red.y >= 0 and Gamut.red.y <= 1
)
green_valid = (
Gamut.green.x >= 0
and Gamut.green.x <= 1
and Gamut.green.y >= 0
and Gamut.green.y <= 1
)
blue_valid = (
Gamut.blue.x >= 0
and Gamut.blue.x <= 1
and Gamut.blue.y >= 0
and Gamut.blue.y <= 1
)
return not_on_line and red_valid and green_valid and blue_valid
| null | null | null | null | null | null | null | null | null |
[] | 4,451 |
tests/components/aladdin_connect/test_model.py
|
liangleslie/core
| 30,023 |
11331742
|
"""Test the Aladdin Connect model class."""
from homeassistant.components.aladdin_connect.model import DoorDevice
from homeassistant.core import HomeAssistant
async def test_model(hass: HomeAssistant) -> None:
"""Test model for Aladdin Connect Model."""
test_values = {
"device_id": "1",
"door_number": "2",
"name": "<NAME>",
"status": "good",
}
result2 = DoorDevice(test_values)
assert result2["device_id"] == "1"
assert result2["door_number"] == "2"
assert result2["name"] == "<NAME>"
assert result2["status"] == "good"
| null | null | null | null | null | null | null | null | null |
[] | 7,062 |
examples/old/map_reduce.py
|
nicolasiltis/prefect
| 8,633 |
546199
|
<reponame>nicolasiltis/prefect<gh_stars>1000+
from prefect import Flow, task
# ------------------------------------
# define some tasks
@task
def numbers_task():
return [1, 2, 3]
@task
def map_task(x):
return x + 1
@task
def reduce_task(x):
return sum(x)
# ------------------------------------
# build a flow
with Flow("Map / Reduce 🤓") as flow:
numbers = numbers_task()
first_map = map_task.map(numbers)
second_map = map_task.map(first_map)
reduction = reduce_task(second_map)
# ------------------------------------
# run the flow
state = flow.run()
assert state.result[reduction].result == 12
| null | null | null | null | null | null | null | null | null |
[] | 10,609 |
diagrams/openstack/workloadprovisioning.py
|
abdulkaderjeelani/diagrams
| 17,037 |
6476450
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _OpenStack
class _Workloadprovisioning(_OpenStack):
_type = "workloadprovisioning"
_icon_dir = "resources/openstack/workloadprovisioning"
class Magnum(_Workloadprovisioning):
_icon = "magnum.png"
class Sahara(_Workloadprovisioning):
_icon = "sahara.png"
class Trove(_Workloadprovisioning):
_icon = "trove.png"
# Aliases
|
0
|
0
|
0.0
|
0
|
0
|
0
|
1.0
|
0
|
0
|
[] | 4,173 |
samples/python/tutorial_code/imgProc/match_template/match_template.py
|
thisisgopalmandal/opencv
| 56,632 |
4924342
|
<gh_stars>1000+
from __future__ import print_function
import sys
import cv2 as cv
## [global_variables]
use_mask = False
img = None
templ = None
mask = None
image_window = "Source Image"
result_window = "Result window"
match_method = 0
max_Trackbar = 5
## [global_variables]
def main(argv):
if (len(sys.argv) < 3):
print('Not enough parameters')
print('Usage:\nmatch_template_demo.py <image_name> <template_name> [<mask_name>]')
return -1
## [load_image]
global img
global templ
img = cv.imread(sys.argv[1], cv.IMREAD_COLOR)
templ = cv.imread(sys.argv[2], cv.IMREAD_COLOR)
if (len(sys.argv) > 3):
global use_mask
use_mask = True
global mask
mask = cv.imread( sys.argv[3], cv.IMREAD_COLOR )
if ((img is None) or (templ is None) or (use_mask and (mask is None))):
print('Can\'t read one of the images')
return -1
## [load_image]
## [create_windows]
cv.namedWindow( image_window, cv.WINDOW_AUTOSIZE )
cv.namedWindow( result_window, cv.WINDOW_AUTOSIZE )
## [create_windows]
## [create_trackbar]
trackbar_label = 'Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED'
cv.createTrackbar( trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod )
## [create_trackbar]
MatchingMethod(match_method)
## [wait_key]
cv.waitKey(0)
return 0
## [wait_key]
def MatchingMethod(param):
global match_method
match_method = param
## [copy_source]
img_display = img.copy()
## [copy_source]
## [match_template]
method_accepts_mask = (cv.TM_SQDIFF == match_method or match_method == cv.TM_CCORR_NORMED)
if (use_mask and method_accepts_mask):
result = cv.matchTemplate(img, templ, match_method, None, mask)
else:
result = cv.matchTemplate(img, templ, match_method)
## [match_template]
## [normalize]
cv.normalize( result, result, 0, 1, cv.NORM_MINMAX, -1 )
## [normalize]
## [best_match]
_minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None)
## [best_match]
## [match_loc]
if (match_method == cv.TM_SQDIFF or match_method == cv.TM_SQDIFF_NORMED):
matchLoc = minLoc
else:
matchLoc = maxLoc
## [match_loc]
## [imshow]
cv.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
cv.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 )
cv.imshow(image_window, img_display)
cv.imshow(result_window, result)
## [imshow]
pass
if __name__ == "__main__":
main(sys.argv[1:])
| null | null | null | null | null | null | null | null | null |
[] | 3,526 |
examples/pxScene2d/external/libnode-v0.12.7/deps/v8/tools/ll_prof.py
|
madanagopaltcomcast/pxCore
| 2,494 |
74950
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bisect
import collections
import ctypes
import disasm
import mmap
import optparse
import os
import re
import subprocess
import sys
import time
USAGE="""usage: %prog [OPTION]...
Analyses V8 and perf logs to produce profiles.
Perf logs can be collected using a command like:
$ perf record -R -e cycles -c 10000 -f -i ./d8 bench.js --ll-prof
# -R: collect all data
# -e cycles: use cpu-cycles event (run "perf list" for details)
# -c 10000: write a sample after each 10000 events
# -f: force output file overwrite
# -i: limit profiling to our process and the kernel
# --ll-prof shell flag enables the right V8 logs
This will produce a binary trace file (perf.data) that %prog can analyse.
IMPORTANT:
The kernel has an internal maximum for events per second, it is 100K by
default. That's not enough for "-c 10000". Set it to some higher value:
$ echo 10000000 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate
You can also make the warning about kernel address maps go away:
$ echo 0 | sudo tee /proc/sys/kernel/kptr_restrict
We have a convenience script that handles all of the above for you:
$ tools/run-llprof.sh ./d8 bench.js
Examples:
# Print flat profile with annotated disassembly for the 10 top
# symbols. Use default log names and include the snapshot log.
$ %prog --snapshot --disasm-top=10
# Print flat profile with annotated disassembly for all used symbols.
# Use default log names and include kernel symbols into analysis.
$ %prog --disasm-all --kernel
# Print flat profile. Use custom log names.
$ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
"""
JS_ORIGIN = "js"
JS_SNAPSHOT_ORIGIN = "js-snapshot"
class Code(object):
"""Code object."""
_id = 0
UNKNOWN = 0
V8INTERNAL = 1
FULL_CODEGEN = 2
OPTIMIZED = 3
def __init__(self, name, start_address, end_address, origin, origin_offset):
self.id = Code._id
Code._id += 1
self.name = name
self.other_names = None
self.start_address = start_address
self.end_address = end_address
self.origin = origin
self.origin_offset = origin_offset
self.self_ticks = 0
self.self_ticks_map = None
self.callee_ticks = None
if name.startswith("LazyCompile:*"):
self.codetype = Code.OPTIMIZED
elif name.startswith("LazyCompile:"):
self.codetype = Code.FULL_CODEGEN
elif name.startswith("v8::internal::"):
self.codetype = Code.V8INTERNAL
else:
self.codetype = Code.UNKNOWN
def AddName(self, name):
assert self.name != name
if self.other_names is None:
self.other_names = [name]
return
if not name in self.other_names:
self.other_names.append(name)
def FullName(self):
if self.other_names is None:
return self.name
self.other_names.sort()
return "%s (aka %s)" % (self.name, ", ".join(self.other_names))
def IsUsed(self):
return self.self_ticks > 0 or self.callee_ticks is not None
def Tick(self, pc):
self.self_ticks += 1
if self.self_ticks_map is None:
self.self_ticks_map = collections.defaultdict(lambda: 0)
offset = pc - self.start_address
self.self_ticks_map[offset] += 1
def CalleeTick(self, callee):
if self.callee_ticks is None:
self.callee_ticks = collections.defaultdict(lambda: 0)
self.callee_ticks[callee] += 1
def PrintAnnotated(self, arch, options):
if self.self_ticks_map is None:
ticks_map = []
else:
ticks_map = self.self_ticks_map.items()
# Convert the ticks map to offsets and counts arrays so that later
# we can do binary search in the offsets array.
ticks_map.sort(key=lambda t: t[0])
ticks_offsets = [t[0] for t in ticks_map]
ticks_counts = [t[1] for t in ticks_map]
# Get a list of disassembled lines and their addresses.
lines = self._GetDisasmLines(arch, options)
if len(lines) == 0:
return
# Print annotated lines.
address = lines[0][0]
total_count = 0
for i in xrange(len(lines)):
start_offset = lines[i][0] - address
if i == len(lines) - 1:
end_offset = self.end_address - self.start_address
else:
end_offset = lines[i + 1][0] - address
# Ticks (reported pc values) are not always precise, i.e. not
# necessarily point at instruction starts. So we have to search
# for ticks that touch the current instruction line.
j = bisect.bisect_left(ticks_offsets, end_offset)
count = 0
for offset, cnt in reversed(zip(ticks_offsets[:j], ticks_counts[:j])):
if offset < start_offset:
break
count += cnt
total_count += count
count = 100.0 * count / self.self_ticks
if count >= 0.01:
print "%15.2f %x: %s" % (count, lines[i][0], lines[i][1])
else:
print "%s %x: %s" % (" " * 15, lines[i][0], lines[i][1])
print
assert total_count == self.self_ticks, \
"Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
def __str__(self):
return "%s [0x%x, 0x%x) size: %d origin: %s" % (
self.name,
self.start_address,
self.end_address,
self.end_address - self.start_address,
self.origin)
def _GetDisasmLines(self, arch, options):
if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
inplace = False
filename = options.log + ".ll"
else:
inplace = True
filename = self.origin
return disasm.GetDisasmLines(filename,
self.origin_offset,
self.end_address - self.start_address,
arch,
inplace)
class CodePage(object):
"""Group of adjacent code objects."""
SHIFT = 20 # 1M pages
SIZE = (1 << SHIFT)
MASK = ~(SIZE - 1)
@staticmethod
def PageAddress(address):
return address & CodePage.MASK
@staticmethod
def PageId(address):
return address >> CodePage.SHIFT
@staticmethod
def PageAddressFromId(id):
return id << CodePage.SHIFT
def __init__(self, address):
self.address = address
self.code_objects = []
def Add(self, code):
self.code_objects.append(code)
def Remove(self, code):
self.code_objects.remove(code)
def Find(self, pc):
code_objects = self.code_objects
for i, code in enumerate(code_objects):
if code.start_address <= pc < code.end_address:
code_objects[0], code_objects[i] = code, code_objects[0]
return code
return None
def __iter__(self):
return self.code_objects.__iter__()
class CodeMap(object):
"""Code object map."""
def __init__(self):
self.pages = {}
self.min_address = 1 << 64
self.max_address = -1
def Add(self, code, max_pages=-1):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
pages = 0
while page_id < limit_id:
if max_pages >= 0 and pages > max_pages:
print >>sys.stderr, \
"Warning: page limit (%d) reached for %s [%s]" % (
max_pages, code.name, code.origin)
break
if page_id in self.pages:
page = self.pages[page_id]
else:
page = CodePage(CodePage.PageAddressFromId(page_id))
self.pages[page_id] = page
page.Add(code)
page_id += 1
pages += 1
self.min_address = min(self.min_address, code.start_address)
self.max_address = max(self.max_address, code.end_address)
def Remove(self, code):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
removed = False
while page_id < limit_id:
if page_id not in self.pages:
page_id += 1
continue
page = self.pages[page_id]
page.Remove(code)
removed = True
page_id += 1
return removed
def AllCode(self):
for page in self.pages.itervalues():
for code in page:
if CodePage.PageAddress(code.start_address) == page.address:
yield code
def UsedCode(self):
for code in self.AllCode():
if code.IsUsed():
yield code
def Print(self):
for code in self.AllCode():
print code
def Find(self, pc):
if pc < self.min_address or pc >= self.max_address:
return None
page_id = CodePage.PageId(pc)
if page_id not in self.pages:
return None
return self.pages[page_id].Find(pc)
class CodeInfo(object):
"""Generic info about generated code objects."""
def __init__(self, arch, header_size):
self.arch = arch
self.header_size = header_size
class SnapshotLogReader(object):
"""V8 snapshot log reader."""
_SNAPSHOT_CODE_NAME_RE = re.compile(
r"snapshot-code-name,(\d+),\"(.*)\"")
def __init__(self, log_name):
self.log_name = log_name
def ReadNameMap(self):
log = open(self.log_name, "r")
try:
snapshot_pos_to_name = {}
for line in log:
match = SnapshotLogReader._SNAPSHOT_CODE_NAME_RE.match(line)
if match:
pos = int(match.group(1))
name = match.group(2)
snapshot_pos_to_name[pos] = name
finally:
log.close()
return snapshot_pos_to_name
class LogReader(object):
"""V8 low-level (binary) log reader."""
_ARCH_TO_POINTER_TYPE_MAP = {
"ia32": ctypes.c_uint32,
"arm": ctypes.c_uint32,
"mips": ctypes.c_uint32,
"x64": ctypes.c_uint64,
"arm64": ctypes.c_uint64
}
_CODE_CREATE_TAG = "C"
_CODE_MOVE_TAG = "M"
_CODE_DELETE_TAG = "D"
_SNAPSHOT_POSITION_TAG = "P"
_CODE_MOVING_GC_TAG = "G"
def __init__(self, log_name, code_map, snapshot_pos_to_name):
self.log_file = open(log_name, "r")
self.log = mmap.mmap(self.log_file.fileno(), 0, mmap.MAP_PRIVATE)
self.log_pos = 0
self.code_map = code_map
self.snapshot_pos_to_name = snapshot_pos_to_name
self.address_to_snapshot_name = {}
self.arch = self.log[:self.log.find("\0")]
self.log_pos += len(self.arch) + 1
assert self.arch in LogReader._ARCH_TO_POINTER_TYPE_MAP, \
"Unsupported architecture %s" % self.arch
pointer_type = LogReader._ARCH_TO_POINTER_TYPE_MAP[self.arch]
self.code_create_struct = LogReader._DefineStruct([
("name_size", ctypes.c_int32),
("code_address", pointer_type),
("code_size", ctypes.c_int32)])
self.code_move_struct = LogReader._DefineStruct([
("from_address", pointer_type),
("to_address", pointer_type)])
self.code_delete_struct = LogReader._DefineStruct([
("address", pointer_type)])
self.snapshot_position_struct = LogReader._DefineStruct([
("address", pointer_type),
("position", ctypes.c_int32)])
def ReadUpToGC(self):
while self.log_pos < self.log.size():
tag = self.log[self.log_pos]
self.log_pos += 1
if tag == LogReader._CODE_MOVING_GC_TAG:
self.address_to_snapshot_name.clear()
return
if tag == LogReader._CODE_CREATE_TAG:
event = self.code_create_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
start_address = event.code_address
end_address = start_address + event.code_size
if start_address in self.address_to_snapshot_name:
name = self.address_to_snapshot_name[start_address]
origin = JS_SNAPSHOT_ORIGIN
else:
name = self.log[self.log_pos:self.log_pos + event.name_size]
origin = JS_ORIGIN
self.log_pos += event.name_size
origin_offset = self.log_pos
self.log_pos += event.code_size
code = Code(name, start_address, end_address, origin, origin_offset)
conficting_code = self.code_map.Find(start_address)
if conficting_code:
if not (conficting_code.start_address == code.start_address and
conficting_code.end_address == code.end_address):
self.code_map.Remove(conficting_code)
else:
LogReader._HandleCodeConflict(conficting_code, code)
# TODO(vitalyr): this warning is too noisy because of our
# attempts to reconstruct code log from the snapshot.
# print >>sys.stderr, \
# "Warning: Skipping duplicate code log entry %s" % code
continue
self.code_map.Add(code)
continue
if tag == LogReader._CODE_MOVE_TAG:
event = self.code_move_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
old_start_address = event.from_address
new_start_address = event.to_address
if old_start_address == new_start_address:
# Skip useless code move entries.
continue
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact move address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
size = code.end_address - code.start_address
code.start_address = new_start_address
code.end_address = new_start_address + size
self.code_map.Add(code)
continue
if tag == LogReader._CODE_DELETE_TAG:
event = self.code_delete_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
old_start_address = event.address
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact delete address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
continue
if tag == LogReader._SNAPSHOT_POSITION_TAG:
event = self.snapshot_position_struct.from_buffer(self.log,
self.log_pos)
self.log_pos += ctypes.sizeof(event)
start_address = event.address
snapshot_pos = event.position
if snapshot_pos in self.snapshot_pos_to_name:
self.address_to_snapshot_name[start_address] = \
self.snapshot_pos_to_name[snapshot_pos]
continue
assert False, "Unknown tag %s" % tag
def Dispose(self):
self.log.close()
self.log_file.close()
@staticmethod
def _DefineStruct(fields):
class Struct(ctypes.Structure):
_fields_ = fields
return Struct
@staticmethod
def _HandleCodeConflict(old_code, new_code):
assert (old_code.start_address == new_code.start_address and
old_code.end_address == new_code.end_address), \
"Conficting code log entries %s and %s" % (old_code, new_code)
if old_code.name == new_code.name:
return
# Code object may be shared by a few functions. Collect the full
# set of names.
old_code.AddName(new_code.name)
class Descriptor(object):
"""Descriptor of a structure in the binary trace log."""
CTYPE_MAP = {
"u16": ctypes.c_uint16,
"u32": ctypes.c_uint32,
"u64": ctypes.c_uint64
}
def __init__(self, fields):
class TraceItem(ctypes.Structure):
_fields_ = Descriptor.CtypesFields(fields)
def __str__(self):
return ", ".join("%s: %s" % (field, self.__getattribute__(field))
for field, _ in TraceItem._fields_)
self.ctype = TraceItem
def Read(self, trace, offset):
return self.ctype.from_buffer(trace, offset)
@staticmethod
def CtypesFields(fields):
return [(field, Descriptor.CTYPE_MAP[format]) for (field, format) in fields]
# Please see http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=tree;f=tools/perf
# for the gory details.
# Reference: struct perf_file_header in kernel/tools/perf/util/header.h
TRACE_HEADER_DESC = Descriptor([
("magic", "u64"),
("size", "u64"),
("attr_size", "u64"),
("attrs_offset", "u64"),
("attrs_size", "u64"),
("data_offset", "u64"),
("data_size", "u64"),
("event_types_offset", "u64"),
("event_types_size", "u64")
])
# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_ATTR_DESC = Descriptor([
("type", "u32"),
("size", "u32"),
("config", "u64"),
("sample_period_or_freq", "u64"),
("sample_type", "u64"),
("read_format", "u64"),
("flags", "u64"),
("wakeup_events_or_watermark", "u32"),
("bp_type", "u32"),
("bp_addr", "u64"),
("bp_len", "u64")
])
# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_HEADER_DESC = Descriptor([
("type", "u32"),
("misc", "u16"),
("size", "u16")
])
# Reference: kernel/events/core.c
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
("addr", "u64"),
("len", "u64"),
("pgoff", "u64")
])
# perf_event_attr.sample_type bits control the set of
# perf_sample_event fields.
PERF_SAMPLE_IP = 1 << 0
PERF_SAMPLE_TID = 1 << 1
PERF_SAMPLE_TIME = 1 << 2
PERF_SAMPLE_ADDR = 1 << 3
PERF_SAMPLE_READ = 1 << 4
PERF_SAMPLE_CALLCHAIN = 1 << 5
PERF_SAMPLE_ID = 1 << 6
PERF_SAMPLE_CPU = 1 << 7
PERF_SAMPLE_PERIOD = 1 << 8
PERF_SAMPLE_STREAM_ID = 1 << 9
PERF_SAMPLE_RAW = 1 << 10
# Reference: /usr/include/perf_event.h, the comment for PERF_RECORD_SAMPLE.
PERF_SAMPLE_EVENT_BODY_FIELDS = [
("ip", "u64", PERF_SAMPLE_IP),
("pid", "u32", PERF_SAMPLE_TID),
("tid", "u32", PERF_SAMPLE_TID),
("time", "u64", PERF_SAMPLE_TIME),
("addr", "u64", PERF_SAMPLE_ADDR),
("id", "u64", PERF_SAMPLE_ID),
("stream_id", "u64", PERF_SAMPLE_STREAM_ID),
("cpu", "u32", PERF_SAMPLE_CPU),
("res", "u32", PERF_SAMPLE_CPU),
("period", "u64", PERF_SAMPLE_PERIOD),
# Don't want to handle read format that comes after the period and
# before the callchain and has variable size.
("nr", "u64", PERF_SAMPLE_CALLCHAIN)
# Raw data follows the callchain and is ignored.
]
PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
PERF_RECORD_MMAP = 1
PERF_RECORD_SAMPLE = 9
class TraceReader(object):
"""Perf (linux-2.6/tools/perf) trace file reader."""
_TRACE_HEADER_MAGIC = 4993446653023372624
def __init__(self, trace_name):
self.trace_file = open(trace_name, "r")
self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
print >>sys.stderr, "Warning: unsupported trace header magic"
self.offset = self.trace_header.data_offset
self.limit = self.trace_header.data_offset + self.trace_header.data_size
assert self.limit <= self.trace.size(), \
"Trace data limit exceeds trace file size"
self.header_size = ctypes.sizeof(PERF_EVENT_HEADER_DESC.ctype)
assert self.trace_header.attrs_size != 0, \
"No perf event attributes found in the trace"
perf_event_attr = PERF_EVENT_ATTR_DESC.Read(self.trace,
self.trace_header.attrs_offset)
self.sample_event_body_desc = self._SampleEventBodyDesc(
perf_event_attr.sample_type)
self.callchain_supported = \
(perf_event_attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0
if self.callchain_supported:
self.ip_struct = Descriptor.CTYPE_MAP[PERF_SAMPLE_EVENT_IP_FORMAT]
self.ip_size = ctypes.sizeof(self.ip_struct)
def ReadEventHeader(self):
if self.offset >= self.limit:
return None, 0
offset = self.offset
header = PERF_EVENT_HEADER_DESC.Read(self.trace, self.offset)
self.offset += header.size
return header, offset
def ReadMmap(self, header, offset):
mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
offset + self.header_size)
# Read null-terminated filename.
filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
offset + header.size]
mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
return mmap_info
def ReadSample(self, header, offset):
sample = self.sample_event_body_desc.Read(self.trace,
offset + self.header_size)
if not self.callchain_supported:
return sample
sample.ips = []
offset += self.header_size + ctypes.sizeof(sample)
for _ in xrange(sample.nr):
sample.ips.append(
self.ip_struct.from_buffer(self.trace, offset).value)
offset += self.ip_size
return sample
def Dispose(self):
self.trace.close()
self.trace_file.close()
def _SampleEventBodyDesc(self, sample_type):
assert (sample_type & PERF_SAMPLE_READ) == 0, \
"Can't hande read format in samples"
fields = [(field, format)
for (field, format, bit) in PERF_SAMPLE_EVENT_BODY_FIELDS
if (bit & sample_type) != 0]
return Descriptor(fields)
OBJDUMP_SECTION_HEADER_RE = re.compile(
r"^\s*\d+\s(\.\S+)\s+[a-f0-9]")
OBJDUMP_SYMBOL_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
r"^DYNAMIC SYMBOL TABLE")
OBJDUMP_SKIP_RE = re.compile(
r"^.*ld\.so\.cache$")
KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
PERF_KERNEL_ALLSYMS_RE = re.compile(
r".*kallsyms.*")
KERNEL_ALLSYMS_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(?:t|T)\s(\S+)$")
class LibraryRepo(object):
def __init__(self):
self.infos = []
self.names = set()
self.ticks = {}
def Load(self, mmap_info, code_map, options):
# Skip kernel mmaps when requested using the fact that their tid
# is 0.
if mmap_info.tid == 0 and not options.kernel:
return True
if OBJDUMP_SKIP_RE.match(mmap_info.filename):
return True
if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
return self._LoadKernelSymbols(code_map)
self.infos.append(mmap_info)
mmap_info.ticks = 0
mmap_info.unique_name = self._UniqueMmapName(mmap_info)
if not os.path.exists(mmap_info.filename):
return True
# Request section headers (-h), symbols (-t), and dynamic symbols
# (-T) from objdump.
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
if mmap_info.filename.endswith(".ko"):
dynamic_symbols = ""
else:
dynamic_symbols = "-T"
process = subprocess.Popen(
"%s -h -t %s -C %s" % (OBJDUMP_BIN, dynamic_symbols, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pipe = process.stdout
after_section = None
code_sections = set()
reloc_sections = set()
dynamic = False
try:
for line in pipe:
if after_section:
if line.find("CODE") != -1:
code_sections.add(after_section)
if line.find("RELOC") != -1:
reloc_sections.add(after_section)
after_section = None
continue
match = OBJDUMP_SECTION_HEADER_RE.match(line)
if match:
after_section = match.group(1)
continue
if OBJDUMP_DYNAMIC_SYMBOLS_START_RE.match(line):
dynamic = True
continue
match = OBJDUMP_SYMBOL_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
origin_offset = start_address
flags = match.group(2)
section = match.group(3)
if section in code_sections:
if dynamic or section in reloc_sections:
start_address += mmap_info.addr
size = int(match.group(4), 16)
name = match.group(5)
origin = mmap_info.filename
code_map.Add(Code(name, start_address, start_address + size,
origin, origin_offset))
finally:
pipe.close()
assert process.wait() == 0, "Failed to objdump %s" % mmap_info.filename
def Tick(self, pc):
for i, mmap_info in enumerate(self.infos):
if mmap_info.addr <= pc < (mmap_info.addr + mmap_info.len):
mmap_info.ticks += 1
self.infos[0], self.infos[i] = mmap_info, self.infos[0]
return True
return False
def _UniqueMmapName(self, mmap_info):
name = mmap_info.filename
index = 1
while name in self.names:
name = "%s-%d" % (mmap_info.filename, index)
index += 1
self.names.add(name)
return name
def _LoadKernelSymbols(self, code_map):
if not os.path.exists(KERNEL_ALLSYMS_FILE):
print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
return False
kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
code = None
for line in kallsyms:
match = KERNEL_ALLSYMS_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
end_address = start_address
name = match.group(2)
if code:
code.end_address = start_address
code_map.Add(code, 16)
code = Code(name, start_address, end_address, "kernel", 0)
return True
def PrintReport(code_map, library_repo, arch, ticks, options):
print "Ticks per symbol:"
used_code = [code for code in code_map.UsedCode()]
used_code.sort(key=lambda x: x.self_ticks, reverse=True)
for i, code in enumerate(used_code):
code_ticks = code.self_ticks
print "%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
code.FullName(), code.origin)
if options.disasm_all or i < options.disasm_top:
code.PrintAnnotated(arch, options)
print
print "Ticks per library:"
mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
mmap_ticks = mmap_info.ticks
print "%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
mmap_info.unique_name)
def PrintDot(code_map, options):
print "digraph G {"
for code in code_map.UsedCode():
if code.self_ticks < 10:
continue
print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
if code.callee_ticks:
for callee, ticks in code.callee_ticks.iteritems():
print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
print "}"
if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
parser.add_option("--snapshot-log",
default="obj/release/snapshot.log",
help="V8 snapshot log file name [default: %default]")
parser.add_option("--log",
default="v8.log",
help="V8 log file name [default: %default]")
parser.add_option("--snapshot",
default=False,
action="store_true",
help="process V8 snapshot log [default: %default]")
parser.add_option("--trace",
default="perf.data",
help="perf trace file name [default: %default]")
parser.add_option("--kernel",
default=False,
action="store_true",
help="process kernel entries [default: %default]")
parser.add_option("--disasm-top",
default=0,
type="int",
help=("number of top symbols to disassemble and annotate "
"[default: %default]"))
parser.add_option("--disasm-all",
default=False,
action="store_true",
help=("disassemble and annotate all used symbols "
"[default: %default]"))
parser.add_option("--dot",
default=False,
action="store_true",
help="produce dot output (WIP) [default: %default]")
parser.add_option("--quiet", "-q",
default=False,
action="store_true",
help="no auxiliary messages [default: %default]")
parser.add_option("--gc-fake-mmap",
default="/tmp/__v8_gc__",
help="gc fake mmap file [default: %default]")
parser.add_option("--objdump",
default="/usr/bin/objdump",
help="objdump tool to use [default: %default]")
parser.add_option("--host-root",
default="",
help="Path to the host root [default: %default]")
options, args = parser.parse_args()
if not options.quiet:
if options.snapshot:
print "V8 logs: %s, %s, %s.ll" % (options.snapshot_log,
options.log,
options.log)
else:
print "V8 log: %s, %s.ll (no snapshot)" % (options.log, options.log)
print "Perf trace file: %s" % options.trace
V8_GC_FAKE_MMAP = options.gc_fake_mmap
HOST_ROOT = options.host_root
if os.path.exists(options.objdump):
disasm.OBJDUMP_BIN = options.objdump
OBJDUMP_BIN = options.objdump
else:
print "Cannot find %s, falling back to default objdump" % options.objdump
# Stats.
events = 0
ticks = 0
missed_ticks = 0
really_missed_ticks = 0
optimized_ticks = 0
generated_ticks = 0
v8_internal_ticks = 0
mmap_time = 0
sample_time = 0
# Process the snapshot log to fill the snapshot name map.
snapshot_name_map = {}
if options.snapshot:
snapshot_log_reader = SnapshotLogReader(log_name=options.snapshot_log)
snapshot_name_map = snapshot_log_reader.ReadNameMap()
# Initialize the log reader.
code_map = CodeMap()
log_reader = LogReader(log_name=options.log + ".ll",
code_map=code_map,
snapshot_pos_to_name=snapshot_name_map)
if not options.quiet:
print "Generated code architecture: %s" % log_reader.arch
print
sys.stdout.flush()
# Process the code and trace logs.
library_repo = LibraryRepo()
log_reader.ReadUpToGC()
trace_reader = TraceReader(options.trace)
while True:
header, offset = trace_reader.ReadEventHeader()
if not header:
break
events += 1
if header.type == PERF_RECORD_MMAP:
start = time.time()
mmap_info = trace_reader.ReadMmap(header, offset)
if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
log_reader.ReadUpToGC()
else:
library_repo.Load(mmap_info, code_map, options)
mmap_time += time.time() - start
elif header.type == PERF_RECORD_SAMPLE:
ticks += 1
start = time.time()
sample = trace_reader.ReadSample(header, offset)
code = code_map.Find(sample.ip)
if code:
code.Tick(sample.ip)
if code.codetype == Code.OPTIMIZED:
optimized_ticks += 1
elif code.codetype == Code.FULL_CODEGEN:
generated_ticks += 1
elif code.codetype == Code.V8INTERNAL:
v8_internal_ticks += 1
else:
missed_ticks += 1
if not library_repo.Tick(sample.ip) and not code:
really_missed_ticks += 1
if trace_reader.callchain_supported:
for ip in sample.ips:
caller_code = code_map.Find(ip)
if caller_code:
if code:
caller_code.CalleeTick(code)
code = caller_code
sample_time += time.time() - start
if options.dot:
PrintDot(code_map, options)
else:
PrintReport(code_map, library_repo, log_reader.arch, ticks, options)
if not options.quiet:
def PrintTicks(number, total, description):
print("%10d %5.1f%% ticks in %s" %
(number, 100.0*number/total, description))
print
print "Stats:"
print "%10d total trace events" % events
print "%10d total ticks" % ticks
print "%10d ticks not in symbols" % missed_ticks
unaccounted = "unaccounted ticks"
if really_missed_ticks > 0:
unaccounted += " (probably in the kernel, try --kernel)"
PrintTicks(really_missed_ticks, ticks, unaccounted)
PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
print "%10d total symbols" % len([c for c in code_map.AllCode()])
print "%10d used symbols" % len([c for c in code_map.UsedCode()])
print "%9.2fs library processing time" % mmap_time
print "%9.2fs tick processing time" % sample_time
log_reader.Dispose()
trace_reader.Dispose()
|
79
|
0
|
0.0
|
231
|
0
|
79
|
1.0
|
2
|
160
|
[
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 114,
"message": "Rename method \"AddName\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 114,
"endOffset": 13,
"startLine": 114,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 119,
"message": "Use the opposite operator (\"not in\") instead.",
"textRange": {
"endLine": 119,
"endOffset": 35,
"startLine": 119,
"startOffset": 7
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 122,
"message": "Rename method \"FullName\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 122,
"endOffset": 14,
"startLine": 122,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 128,
"message": "Rename method \"IsUsed\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 128,
"endOffset": 12,
"startLine": 128,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 131,
"message": "Rename method \"Tick\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 131,
"endOffset": 10,
"startLine": 131,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 138,
"message": "Rename method \"CalleeTick\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 138,
"endOffset": 16,
"startLine": 138,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 143,
"message": "Rename method \"PrintAnnotated\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 143,
"endOffset": 20,
"startLine": 143,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 178,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 178,
"endOffset": 13,
"startLine": 178,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 180,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 180,
"endOffset": 13,
"startLine": 180,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 181,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 181,
"endOffset": 9,
"startLine": 181,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 193,
"message": "Rename method \"_GetDisasmLines\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 193,
"endOffset": 21,
"startLine": 193,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 215,
"message": "Rename method \"PageAddress\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 215,
"endOffset": 17,
"startLine": 215,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 219,
"message": "Rename method \"PageId\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 219,
"endOffset": 12,
"startLine": 219,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 223,
"message": "Rename method \"PageAddressFromId\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 223,
"endOffset": 23,
"startLine": 223,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 230,
"message": "Rename method \"Add\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 230,
"endOffset": 9,
"startLine": 230,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 233,
"message": "Rename method \"Remove\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 233,
"endOffset": 12,
"startLine": 233,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 236,
"message": "Rename method \"Find\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 236,
"endOffset": 10,
"startLine": 236,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 256,
"message": "Rename method \"Add\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 256,
"endOffset": 9,
"startLine": 256,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 262,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 262,
"endOffset": 13,
"startLine": 262,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 277,
"message": "Rename method \"Remove\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 277,
"endOffset": 12,
"startLine": 277,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 291,
"message": "Rename method \"AllCode\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 291,
"endOffset": 13,
"startLine": 291,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 297,
"message": "Rename method \"UsedCode\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 297,
"endOffset": 14,
"startLine": 297,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 302,
"message": "Rename method \"Print\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 302,
"endOffset": 11,
"startLine": 302,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 304,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 304,
"endOffset": 11,
"startLine": 304,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 306,
"message": "Rename method \"Find\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 306,
"endOffset": 10,
"startLine": 306,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 332,
"message": "Rename method \"ReadNameMap\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 332,
"endOffset": 17,
"startLine": 332,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 394,
"message": "Rename method \"ReadUpToGC\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 394,
"endOffset": 16,
"startLine": 394,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 394,
"message": "Refactor this function to reduce its Cognitive Complexity from 36 to the 15 allowed.",
"textRange": {
"endLine": 394,
"endOffset": 16,
"startLine": 394,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 425,
"message": "Complete the task associated to this \"TODO\" comment.",
"textRange": {
"endLine": 425,
"endOffset": 69,
"startLine": 425,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 443,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 443,
"endOffset": 15,
"startLine": 443,
"startOffset": 10
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 460,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 460,
"endOffset": 15,
"startLine": 460,
"startOffset": 10
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 480,
"message": "Rename method \"Dispose\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 480,
"endOffset": 13,
"startLine": 480,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 485,
"message": "Rename method \"_DefineStruct\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 485,
"endOffset": 19,
"startLine": 485,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 491,
"message": "Rename method \"_HandleCodeConflict\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 491,
"endOffset": 25,
"startLine": 491,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 521,
"message": "Rename method \"Read\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 521,
"endOffset": 10,
"startLine": 521,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 525,
"message": "Rename method \"CtypesFields\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 525,
"endOffset": 18,
"startLine": 525,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 632,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 632,
"endOffset": 11,
"startLine": 632,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 650,
"message": "Rename method \"ReadEventHeader\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 650,
"endOffset": 21,
"startLine": 650,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 658,
"message": "Rename method \"ReadMmap\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 658,
"endOffset": 14,
"startLine": 658,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 667,
"message": "Remove the unused function parameter \"header\".",
"textRange": {
"endLine": 667,
"endOffset": 29,
"startLine": 667,
"startOffset": 23
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 667,
"message": "Rename method \"ReadSample\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 667,
"endOffset": 16,
"startLine": 667,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 680,
"message": "Rename method \"Dispose\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 680,
"endOffset": 13,
"startLine": 680,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 684,
"message": "Rename method \"_SampleEventBodyDesc\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 684,
"endOffset": 26,
"startLine": 684,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 705,
"message": "Replace this alternation with a character class.",
"textRange": {
"endLine": 705,
"endOffset": 24,
"startLine": 705,
"startOffset": 21
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 714,
"message": "Refactor this function to reduce its Cognitive Complexity from 30 to the 15 allowed.",
"textRange": {
"endLine": 714,
"endOffset": 10,
"startLine": 714,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 714,
"message": "Rename method \"Load\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 714,
"endOffset": 10,
"startLine": 714,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 768,
"message": "Remove the unused local variable \"flags\".",
"textRange": {
"endLine": 768,
"endOffset": 15,
"startLine": 768,
"startOffset": 10
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 782,
"message": "Rename method \"Tick\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 782,
"endOffset": 10,
"startLine": 782,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 790,
"message": "Rename method \"_UniqueMmapName\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 790,
"endOffset": 21,
"startLine": 790,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 799,
"message": "Rename method \"_LoadKernelSymbols\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 799,
"endOffset": 24,
"startLine": 799,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 801,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 801,
"endOffset": 11,
"startLine": 801,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 818,
"message": "Rename function \"PrintReport\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 818,
"endOffset": 15,
"startLine": 818,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 819,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 819,
"endOffset": 7,
"startLine": 819,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 824,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 824,
"endOffset": 9,
"startLine": 824,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 828,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 828,
"endOffset": 7,
"startLine": 828,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 829,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 829,
"endOffset": 7,
"startLine": 829,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 834,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 834,
"endOffset": 9,
"startLine": 834,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 838,
"message": "Remove the unused function parameter \"options\".",
"textRange": {
"endLine": 838,
"endOffset": 30,
"startLine": 838,
"startOffset": 23
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 838,
"message": "Rename function \"PrintDot\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 838,
"endOffset": 12,
"startLine": 838,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 839,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 839,
"endOffset": 7,
"startLine": 839,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 843,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 843,
"endOffset": 9,
"startLine": 843,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 846,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 846,
"endOffset": 13,
"startLine": 846,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 847,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 847,
"endOffset": 7,
"startLine": 847,
"startOffset": 2
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 900,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 900,
"endOffset": 11,
"startLine": 900,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 904,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 904,
"endOffset": 11,
"startLine": 904,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 905,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 905,
"endOffset": 9,
"startLine": 905,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 913,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 913,
"endOffset": 9,
"startLine": 913,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 938,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 938,
"endOffset": 9,
"startLine": 938,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 939,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 939,
"endOffset": 9,
"startLine": 939,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 991,
"message": "Rename function \"PrintTicks\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 991,
"endOffset": 20,
"startLine": 991,
"startOffset": 10
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 994,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 994,
"endOffset": 11,
"startLine": 994,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 995,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 995,
"endOffset": 11,
"startLine": 995,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 996,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 996,
"endOffset": 11,
"startLine": 996,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 997,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 997,
"endOffset": 11,
"startLine": 997,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 998,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 998,
"endOffset": 11,
"startLine": 998,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 1006,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 1006,
"endOffset": 11,
"startLine": 1006,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 1007,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 1007,
"endOffset": 11,
"startLine": 1007,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 1008,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 1008,
"endOffset": 11,
"startLine": 1008,
"startOffset": 6
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 1009,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 1009,
"endOffset": 11,
"startLine": 1009,
"startOffset": 6
},
"type": "CODE_SMELL"
}
] | 473 |
tests/testing_config/custom_components/test/binary_sensor.py
|
domwillcode/home-assistant
| 30,023 |
3218482
|
"""
Provide a mock binary sensor platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from tests.common import MockEntity
ENTITIES = {}
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
{}
if empty
else {
device_class: MockBinarySensor(
name=f"{device_class} sensor",
is_on=True,
unique_id=f"unique_{device_class}",
device_class=device_class,
)
for device_class in DEVICE_CLASSES
}
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(list(ENTITIES.values()))
class MockBinarySensor(MockEntity, BinarySensorEntity):
"""Mock Binary Sensor class."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._handle("is_on")
@property
def device_class(self):
"""Return the class of this sensor."""
return self._handle("device_class")
| null | null | null | null | null | null | null | null | null |
[] | 2,126 |
tests/pyb/i2c_accel.py
|
sebastien-riou/micropython
| 4,538 |
9610447
|
# use accelerometer to test i2c bus
import pyb
from pyb import I2C
if not hasattr(pyb, "Accel"):
print("SKIP")
raise SystemExit
accel_addr = 76
pyb.Accel() # this will init the MMA for us
i2c = I2C(1, I2C.MASTER, baudrate=400000)
print(i2c.scan())
print(i2c.is_ready(accel_addr))
print(i2c.mem_read(1, accel_addr, 7, timeout=500))
i2c.mem_write(0, accel_addr, 0, timeout=500)
i2c.send(7, addr=accel_addr)
i2c.recv(1, addr=accel_addr)
|
0
|
0
|
0.0
|
1
|
0
|
0
|
1.0
|
0
|
1
|
[] | 5,548 |
tests/test_lazy_hyperlinks.py
|
scratchmex/django-rest-framework
| 17,395 |
8112240
|
<reponame>scratchmex/django-rest-framework<filename>tests/test_lazy_hyperlinks.py
from django.db import models
from django.test import TestCase, override_settings
from django.urls import path
from rest_framework import serializers
from rest_framework.renderers import JSONRenderer
from rest_framework.templatetags.rest_framework import format_value
str_called = False
class Example(models.Model):
text = models.CharField(max_length=100)
def __str__(self):
global str_called
str_called = True
return 'An example'
class ExampleSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Example
fields = ('url', 'id', 'text')
def dummy_view(request):
pass
urlpatterns = [
path('example/<int:pk>/', dummy_view, name='example-detail'),
]
@override_settings(ROOT_URLCONF='tests.test_lazy_hyperlinks')
class TestLazyHyperlinkNames(TestCase):
def setUp(self):
self.example = Example.objects.create(text='foo')
def test_lazy_hyperlink_names(self):
global str_called
context = {'request': None}
serializer = ExampleSerializer(self.example, context=context)
JSONRenderer().render(serializer.data)
assert not str_called
hyperlink_string = format_value(serializer.data['url'])
assert hyperlink_string == '<a href=/example/1/>An example</a>'
assert str_called
| null | null | null | null | null | null | null | null | null |
[] | 5,102 |
tests/components/recorder/test_statistics_v23_migration.py
|
mib1185/core
| 30,023 |
8449169
|
<filename>tests/components/recorder/test_statistics_v23_migration.py
"""The tests for sensor recorder platform migrating statistics from v23.
The v23 schema used for these tests has been slightly modified to add the
EventData table to allow the recorder to startup successfully.
"""
# pylint: disable=protected-access,invalid-name
import importlib
import json
import sys
from unittest.mock import patch
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from homeassistant.components import recorder
from homeassistant.components.recorder import SQLITE_URL_PREFIX, statistics
from homeassistant.components.recorder.util import session_scope
from homeassistant.setup import setup_component
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
from tests.components.recorder.common import wait_recording_done
ORIG_TZ = dt_util.DEFAULT_TIME_ZONE
CREATE_ENGINE_TARGET = "homeassistant.components.recorder.core.create_engine"
SCHEMA_MODULE = "tests.components.recorder.db_schema_23_with_newer_columns"
def _create_engine_test(*args, **kwargs):
"""Test version of create_engine that initializes with old schema.
This simulates an existing db with the old schema.
"""
importlib.import_module(SCHEMA_MODULE)
old_db_schema = sys.modules[SCHEMA_MODULE]
engine = create_engine(*args, **kwargs)
old_db_schema.Base.metadata.create_all(engine)
with Session(engine) as session:
session.add(
recorder.db_schema.StatisticsRuns(start=statistics.get_start_time())
)
session.add(
recorder.db_schema.SchemaChanges(
schema_version=old_db_schema.SCHEMA_VERSION
)
)
session.commit()
return engine
def test_delete_duplicates(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
importlib.import_module(SCHEMA_MODULE)
old_db_schema = sys.modules[SCHEMA_MODULE]
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_statistics_1 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
)
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
external_energy_statistics_2 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 20,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 30,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 40,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
)
external_energy_metadata_2 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_2",
"unit_of_measurement": "kWh",
}
external_co2_statistics = (
{
"start": period1,
"last_reset": None,
"mean": 10,
},
{
"start": period2,
"last_reset": None,
"mean": 30,
},
{
"start": period3,
"last_reset": None,
"mean": 60,
},
{
"start": period4,
"last_reset": None,
"mean": 90,
},
)
external_co2_metadata = {
"has_mean": True,
"has_sum": False,
"name": "Fossil percentage",
"source": "test",
"statistic_id": "test:fossil_percentage",
"unit_of_measurement": "%",
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "db_schema", old_db_schema), patch.object(
recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION
), patch(CREATE_ENGINE_TARGET, new=_create_engine_test):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1)
)
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2)
)
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_co2_metadata)
)
with session_scope(hass=hass) as session:
for stat in external_energy_statistics_1:
session.add(recorder.db_schema.Statistics.from_stats(1, stat))
for stat in external_energy_statistics_2:
session.add(recorder.db_schema.Statistics.from_stats(2, stat))
for stat in external_co2_statistics:
session.add(recorder.db_schema.Statistics.from_stats(3, stat))
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
assert "Deleted 2 duplicated statistics rows" in caplog.text
assert "Found non identical" not in caplog.text
assert "Found duplicated" not in caplog.text
def test_delete_duplicates_many(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
importlib.import_module(SCHEMA_MODULE)
old_db_schema = sys.modules[SCHEMA_MODULE]
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_statistics_1 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
)
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
external_energy_statistics_2 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 20,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 30,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 40,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
)
external_energy_metadata_2 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_2",
"unit_of_measurement": "kWh",
}
external_co2_statistics = (
{
"start": period1,
"last_reset": None,
"mean": 10,
},
{
"start": period2,
"last_reset": None,
"mean": 30,
},
{
"start": period3,
"last_reset": None,
"mean": 60,
},
{
"start": period4,
"last_reset": None,
"mean": 90,
},
)
external_co2_metadata = {
"has_mean": True,
"has_sum": False,
"name": "Fossil percentage",
"source": "test",
"statistic_id": "test:fossil_percentage",
"unit_of_measurement": "%",
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "db_schema", old_db_schema), patch.object(
recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION
), patch(CREATE_ENGINE_TARGET, new=_create_engine_test):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1)
)
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2)
)
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_co2_metadata)
)
with session_scope(hass=hass) as session:
for stat in external_energy_statistics_1:
session.add(recorder.db_schema.Statistics.from_stats(1, stat))
for _ in range(3000):
session.add(
recorder.db_schema.Statistics.from_stats(
1, external_energy_statistics_1[-1]
)
)
for stat in external_energy_statistics_2:
session.add(recorder.db_schema.Statistics.from_stats(2, stat))
for stat in external_co2_statistics:
session.add(recorder.db_schema.Statistics.from_stats(3, stat))
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
assert "Deleted 3002 duplicated statistics rows" in caplog.text
assert "Found non identical" not in caplog.text
assert "Found duplicated" not in caplog.text
@pytest.mark.freeze_time("2021-08-01 00:00:00+00:00")
def test_delete_duplicates_non_identical(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
importlib.import_module(SCHEMA_MODULE)
old_db_schema = sys.modules[SCHEMA_MODULE]
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_statistics_1 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 6,
},
)
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
external_energy_statistics_2 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 20,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 30,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 40,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
)
external_energy_metadata_2 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_2",
"unit_of_measurement": "kWh",
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "db_schema", old_db_schema), patch.object(
recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION
), patch(CREATE_ENGINE_TARGET, new=_create_engine_test):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1)
)
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_2)
)
with session_scope(hass=hass) as session:
for stat in external_energy_statistics_1:
session.add(recorder.db_schema.Statistics.from_stats(1, stat))
for stat in external_energy_statistics_2:
session.add(recorder.db_schema.Statistics.from_stats(2, stat))
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
hass.config.config_dir = tmpdir
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
assert "Deleted 2 duplicated statistics rows" in caplog.text
assert "Deleted 1 non identical" in caplog.text
assert "Found duplicated" not in caplog.text
isotime = dt_util.utcnow().isoformat()
backup_file_name = f".storage/deleted_statistics.{isotime}.json"
with open(hass.config.path(backup_file_name)) as backup_file:
backup = json.load(backup_file)
assert backup == [
{
"duplicate": {
"created": "2021-08-01T00:00:00",
"id": 4,
"last_reset": None,
"max": None,
"mean": None,
"metadata_id": 1,
"min": None,
"start": "2021-10-31T23:00:00",
"state": 3.0,
"sum": 5.0,
},
"original": {
"created": "2021-08-01T00:00:00",
"id": 5,
"last_reset": None,
"max": None,
"mean": None,
"metadata_id": 1,
"min": None,
"start": "2021-10-31T23:00:00",
"state": 3.0,
"sum": 6.0,
},
}
]
def test_delete_duplicates_short_term(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
importlib.import_module(SCHEMA_MODULE)
old_db_schema = sys.modules[SCHEMA_MODULE]
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
statistic_row = {
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "db_schema", old_db_schema), patch.object(
recorder.migration, "SCHEMA_VERSION", old_db_schema.SCHEMA_VERSION
), patch(CREATE_ENGINE_TARGET, new=_create_engine_test):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.db_schema.StatisticsMeta.from_meta(external_energy_metadata_1)
)
with session_scope(hass=hass) as session:
session.add(
recorder.db_schema.StatisticsShortTerm.from_stats(1, statistic_row)
)
session.add(
recorder.db_schema.StatisticsShortTerm.from_stats(1, statistic_row)
)
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
hass.config.config_dir = tmpdir
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
dt_util.DEFAULT_TIME_ZONE = ORIG_TZ
assert "duplicated statistics rows" not in caplog.text
assert "Found non identical" not in caplog.text
assert "Deleted duplicated short term statistic" in caplog.text
|
6
|
0
|
68.5
|
9
|
0
|
6
|
1.0
|
0
|
14
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 55,
"message": "Define a constant instead of duplicating this literal \"test_run_info.db\" 4 times.",
"textRange": {
"endLine": 55,
"endOffset": 65,
"startLine": 55,
"startOffset": 47
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 101,
"message": "Define a constant instead of duplicating this literal \"Total imported energy\" 7 times.",
"textRange": {
"endLine": 101,
"endOffset": 39,
"startLine": 101,
"startOffset": 16
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 103,
"message": "Define a constant instead of duplicating this literal \"test:total_energy_import_tariff_1\" 4 times.",
"textRange": {
"endLine": 103,
"endOffset": 59,
"startLine": 103,
"startOffset": 24
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 143,
"message": "Define a constant instead of duplicating this literal \"test:total_energy_import_tariff_2\" 3 times.",
"textRange": {
"endLine": 143,
"endOffset": 59,
"startLine": 143,
"startOffset": 24
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 217,
"message": "Define a constant instead of duplicating this literal \"Found non identical\" 3 times.",
"textRange": {
"endLine": 217,
"endOffset": 32,
"startLine": 217,
"startOffset": 11
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 218,
"message": "Define a constant instead of duplicating this literal \"Found duplicated\" 3 times.",
"textRange": {
"endLine": 218,
"endOffset": 29,
"startLine": 218,
"startOffset": 11
},
"type": "CODE_SMELL"
}
] | 10,955 |
recipes/Python/577172_writepath_loadpath_methods_file/recipe-577172.py
|
tdiprima/code
| 2,023 |
6592001
|
<gh_stars>1000+
def _write_path(path, text, encoding, create_backup=False, log=None):
"""Write content to a path.
@param path {str}
@param text {unicode}
@param encoding {str} The file encoding to use.
@param create_backup {bool} Default False. Whether to create a backup
file. The path of the backup will be `<path>.bak`. If that path
exists it will be overwritten.
@param log {logging.Logger} A logger to use for logging. No logging is
done if it this is not given.
"""
import os
from os.path import exists, split, join
import codecs
# Write out new content to '.foo.tmp'.
dir, base = split(path)
tmp_path = join(dir, '.' + base + '.tmp')
f = codecs.open(tmp_path, 'wb', encoding=encoding)
try:
f.write(text)
finally:
f.close()
# Backup to 'foo.bak'.
if create_backup:
bak_path = path + ".bak"
if exists(bak_path):
os.rename(path, bak_path)
elif exists(path):
os.remove(path)
# Move '.foo.tmp' to 'foo'.
os.rename(tmp_path, path)
if log:
log.info("wrote `%s'", path)
def _load_path(path, encoding="utf-8", log=None):
"""Return the content of the given path.
@param path {str}
@param encoding {str} Default 'utf-8'.
@param log {logging.Logger} A logger to use for logging. No logging is
done if it this is not given.
@returns {2-tuple} (<text>, <encoding>) where `text` is the
unicode text content of the file and `encoding` is the encoding of
the file. `text` is None if there was an error. Errors are logged
via `log.error`.
"""
import codecs
try:
f = codecs.open(path, 'rb', encoding)
except EnvironmentError, ex:
if log:
log.error("could not open `%s': %s", path, ex)
return None, None
else:
try:
try:
text = f.read()
except UnicodeDecodeError, ex:
if log:
log.error("could not read `%s': %s", path, ex)
return None, None
finally:
f.close()
return text, encoding
| null | null | null | null | null | null | null | null | null |
[] | 4,645 |
recipes/Python/576543_Prime_Number_Generator_Checker/recipe-576543.py
|
tdiprima/code
| 2,023 |
11576
|
<reponame>tdiprima/code<gh_stars>1000+
#
# prime number generator
# This program gets two number as input
# and prints
# Prime numbers in the range
# Actual number of primes in the range
# and Estimation based on formula
# n
# pi(n)= -------
# log(n)
# pi(n)=number of primes less than n
#
from math import *
def isPrime(n):
if n%2==0 and n!=2:return False #if number is EVEN AND it is NOT 2
k = n**0.5 ; m = ceil(k) #if number is PERFECT SQUARE
if k==m:return False
for i in xrange(3,int(m),2): #divisibility test ODDS ONLY
if n%i==0:return False
return True #otherwise it is PRIME
if __name__=='__main__':
s = input('Enter Start: ')
e = input('Enter End: ')
s|=1 #if s%2==0:s+=1 # ODDS only
list = [x for x in range(s,e,2) if isPrime(x)]
print list,'\n',len(list),'\n',int(ceil(e/log(e)-s/log(s)))
#prints list of primes , length of list , estimate using the formula
|
6
|
1
|
0.0
|
7
|
0
|
5
|
3.0
|
0
|
8
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 14,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 14,
"endOffset": 18,
"startLine": 14,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 15,
"message": "Rename function \"isPrime\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 15,
"endOffset": 11,
"startLine": 15,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 29,
"message": "Remove this commented out code.",
"textRange": {
"endLine": 29,
"endOffset": 68,
"startLine": 29,
"startOffset": 39
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 31,
"message": "Change this argument; Function \"log\" expects a different type",
"textRange": {
"endLine": 31,
"endOffset": 60,
"startLine": 31,
"startOffset": 59
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "RELIABILITY"
}
],
"line": 31,
"message": "Fix this invalid \"/\" operation between incompatible types (str and float).",
"textRange": {
"endLine": 31,
"endOffset": 55,
"startLine": 31,
"startOffset": 54
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 31,
"message": "Replace print statement by built-in function.",
"textRange": {
"endLine": 31,
"endOffset": 9,
"startLine": 31,
"startOffset": 4
},
"type": "CODE_SMELL"
}
] | 100 |
test/nn/functional/test_bro.py
|
NucciTheBoss/pytorch_geometric
| 2,350 |
1816381
|
<filename>test/nn/functional/test_bro.py
import torch
from torch_geometric.nn.functional import bro
def test_bro():
batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 2, 2])
g1 = torch.tensor([
[0.2, 0.2, 0.2, 0.2],
[0.0, 0.2, 0.2, 0.2],
[0.2, 0.0, 0.2, 0.2],
[0.2, 0.2, 0.0, 0.2],
])
g2 = torch.tensor([
[0.2, 0.2, 0.2, 0.2],
[0.0, 0.2, 0.2, 0.2],
[0.2, 0.0, 0.2, 0.2],
])
g3 = torch.tensor([
[0.2, 0.2, 0.2, 0.2],
[0.2, 0.0, 0.2, 0.2],
])
s = 0.
for g in [g1, g2, g3]:
s += torch.norm(g @ g.t() - torch.eye(g.shape[0]), p=2)
assert torch.isclose(s / 3., bro(torch.cat([g1, g2, g3], dim=0), batch))
| null | null | null | null | null | null | null | null | null |
[] | 8,219 |
project_euler/problem_074/sol2.py
|
NavpreetDevpuri/Python
| 145,614 |
1882791
|
"""
Project Euler Problem 074: https://projecteuler.net/problem=74
Starting from any positive integer number
it is possible to attain another one summing the factorial of its digits.
Repeating this step, we can build chains of numbers.
It is not difficult to prove that EVERY starting number
will eventually get stuck in a loop.
The request is to find how many numbers less than one million
produce a chain with exactly 60 non repeating items.
Solution approach:
This solution simply consists in a loop that generates
the chains of non repeating items.
The generation of the chain stops before a repeating item
or if the size of the chain is greater then the desired one.
After generating each chain, the length is checked and the
counter increases.
"""
factorial_cache: dict[int, int] = {}
factorial_sum_cache: dict[int, int] = {}
def factorial(a: int) -> int:
"""Returns the factorial of the input a
>>> factorial(5)
120
>>> factorial(6)
720
>>> factorial(0)
1
"""
# The factorial function is not defined for negative numbers
if a < 0:
raise ValueError("Invalid negative input!", a)
if a in factorial_cache:
return factorial_cache[a]
# The case of 0! is handled separately
if a == 0:
factorial_cache[a] = 1
else:
# use a temporary support variable to store the computation
temporary_number = a
temporary_computation = 1
while temporary_number > 0:
temporary_computation *= temporary_number
temporary_number -= 1
factorial_cache[a] = temporary_computation
return factorial_cache[a]
def factorial_sum(a: int) -> int:
"""Function to perform the sum of the factorial
of all the digits in a
>>> factorial_sum(69)
363600
"""
if a in factorial_sum_cache:
return factorial_sum_cache[a]
# Prepare a variable to hold the computation
fact_sum = 0
""" Convert a in string to iterate on its digits
convert the digit back into an int
and add its factorial to fact_sum.
"""
for i in str(a):
fact_sum += factorial(int(i))
factorial_sum_cache[a] = fact_sum
return fact_sum
def solution(chain_length: int = 60, number_limit: int = 1000000) -> int:
"""Returns the number of numbers that produce
chains with exactly 60 non repeating elements.
>>> solution(10, 1000)
26
"""
# the counter for the chains with the exact desired length
chain_counter = 0
for i in range(1, number_limit + 1):
# The temporary list will contain the elements of the chain
chain_set = {i}
len_chain_set = 1
last_chain_element = i
# The new element of the chain
new_chain_element = factorial_sum(last_chain_element)
# Stop computing the chain when you find a repeating item
# or the length it greater then the desired one.
while new_chain_element not in chain_set and len_chain_set <= chain_length:
chain_set.add(new_chain_element)
len_chain_set += 1
last_chain_element = new_chain_element
new_chain_element = factorial_sum(last_chain_element)
# If the while exited because the chain list contains the exact amount
# of elements increase the counter
if len_chain_set == chain_length:
chain_counter += 1
return chain_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
|
0
|
0
|
0.0
|
15
|
0
|
0
|
1.0
|
0
|
14
|
[] | 8,579 |
platformio/debug/process/client.py
|
franzbischoff/platformio-core
| 4,744 |
10165323
|
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
import signal
import tempfile
from platformio import fs, proc
from platformio.cache import ContentCache
from platformio.compat import IS_WINDOWS, hashlib_encode_data
from platformio.debug.process.base import DebugBaseProcess
from platformio.debug.process.server import DebugServerProcess
from platformio.project.helpers import get_project_cache_dir
class DebugClientProcess(DebugBaseProcess):
def __init__(self, project_dir, debug_config):
super(DebugClientProcess, self).__init__()
self.project_dir = project_dir
self.debug_config = debug_config
self._server_process = None
self._session_id = None
if not os.path.isdir(get_project_cache_dir()):
os.makedirs(get_project_cache_dir())
self.working_dir = tempfile.mkdtemp(
dir=get_project_cache_dir(), prefix=".piodebug-"
)
self._target_is_running = False
self._errors_buffer = b""
async def run(self):
session_hash = (
self.debug_config.client_executable_path + self.debug_config.program_path
)
self._session_id = hashlib.sha1(hashlib_encode_data(session_hash)).hexdigest()
self._kill_previous_session()
if self.debug_config.server:
self._server_process = DebugServerProcess(self.debug_config)
self.debug_config.port = await self._server_process.run()
def connection_made(self, transport):
super(DebugClientProcess, self).connection_made(transport)
self._lock_session(transport.get_pid())
# Disable SIGINT and allow GDB's Ctrl+C interrupt
signal.signal(signal.SIGINT, lambda *args, **kwargs: None)
self.connect_stdin_pipe()
def process_exited(self):
if self._server_process:
self._server_process.terminate()
super(DebugClientProcess, self).process_exited()
def _kill_previous_session(self):
assert self._session_id
pid = None
with ContentCache() as cc:
pid = cc.get(self._session_id)
cc.delete(self._session_id)
if not pid:
return
if IS_WINDOWS:
kill = ["Taskkill", "/PID", pid, "/F"]
else:
kill = ["kill", pid]
try:
proc.exec_command(kill)
except: # pylint: disable=bare-except
pass
def _lock_session(self, pid):
if not self._session_id:
return
with ContentCache() as cc:
cc.set(self._session_id, str(pid), "1h")
def _unlock_session(self):
if not self._session_id:
return
with ContentCache() as cc:
cc.delete(self._session_id)
def __del__(self):
self._unlock_session()
if self.working_dir and os.path.isdir(self.working_dir):
fs.rmtree(self.working_dir)
| null | null | null | null | null | null | null | null | null |
[] | 11,879 |
jumpy/jumpy/ndarray.py
|
rghwer/testdocs
| 13,006 |
6072
|
################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .java_classes import *
import numpy as np
import ctypes
import warnings
native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps()
# DATA TYPE MANAGEMENT
DOUBLE = DataType.DOUBLE
FLOAT = DataType.FLOAT
HALF = DataType.HALF
LONG = DataType.LONG
INT = DataType.INT
SHORT = DataType.SHORT
UBYTE = DataType.UBYTE
BYTE = DataType.BYTE
BOOL = DataType.BOOL
UTF8 = DataType.UTF8
COMPRESSED = DataType.COMPRESSED
UNKNOWN = DataType.UNKNOWN
SUPPORTED_JAVA_DTYPES = [
DOUBLE,
FLOAT,
HALF,
LONG,
INT,
SHORT,
BOOL
#UTF8
]
SUPPORTED_PYTHON_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.bool_
#np.str_
]
_PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
_J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
def _dtype_py2j(dtype):
if isinstance(dtype, str):
dtype = np.dtype(dtype).type
elif isinstance(dtype, np.dtype):
dtype = dtype.type
jtype = _PY2J.get(dtype)
if jtype is None:
raise NotImplementedError("Unsupported type: " + dtype.name)
return jtype
def _dtype_j2py(dtype):
pytype = _J2PY.get(dtype)
if pytype is None:
raise NotImplementedError("Unsupported type: " + (str(dtype)))
return pytype
def set_context_dtype(dtype):
'''
Sets the dtype for nd4j
# Arguments
dtype: 'float' or 'double'
'''
dtype_map = {
'float32': 'float',
'float64': 'double'
}
dtype = dtype_map.get(dtype, dtype)
if dtype not in ['float', 'double']:
raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype))
dtype_ = DataTypeUtil.getDtypeFromContext(dtype)
DataTypeUtil.setDTypeForContext(dtype_)
if get_context_dtype() != dtype:
warnings.warn("Can not set context dtype now. Set it at the beginning of your program.")
def get_context_dtype():
'''
Returns the nd4j dtype
'''
dtype = DataTypeUtil.getDtypeFromContext()
return DataTypeUtil.getDTypeForName(dtype)
_refs = []
def _from_numpy(np_array):
'''
Convert numpy array to nd4j array
'''
pointer_address, _ = np_array.__array_interface__['data']
_refs.append(np_array)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_array.size
pointer.limit(size)
jdtype = _dtype_py2j(np_array.dtype)
'''
mapping = {
DOUBLE: DoublePointer,
FLOAT: FloatPointer,
HALF: HalfPointer,
LONG: LongPointer,
INT: IntPointer,
SHORT: ShortPointer,
BOOL: BoolPointer
}
pc = mapping[jdtype]
#pointer = pc(pointer)
'''
buff = Nd4j.createBuffer(pointer, size, jdtype)
assert buff.address() == pointer_address
_refs.append(buff)
elem_size = buff.getElementSize()
assert elem_size == np_array.dtype.itemsize
strides = np_array.strides
strides = [dim / elem_size for dim in strides]
shape = np_array.shape
nd4j_array = Nd4j.create(buff, shape, strides, 0)
assert buff.address() == nd4j_array.data().address()
return nd4j_array
def _to_numpy(nd4j_array):
'''
Convert nd4j array to numpy array
'''
buff = nd4j_array.data()
address = buff.pointer().address()
dtype = nd4j_array.dataType().toString()
mapping = {
'DOUBLE': ctypes.c_double,
'FLOAT': ctypes.c_float,
'HALF': ctypes.c_short,
'LONG': ctypes.c_long,
'INT': ctypes.c_int,
'SHORT': ctypes.c_short,
'BOOL': ctypes.c_bool
}
Pointer = ctypes.POINTER(mapping[dtype])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
def _indarray(x):
typ = type(x)
if typ is INDArray:
return x
elif typ is ndarray:
return x.array
elif 'numpy' in str(typ):
return _from_numpy(x)
elif typ in (list, tuple):
return _from_numpy(np.array(x))
elif typ in (int, float):
return Nd4j.scalar(x)
else:
raise Exception('Data type not understood :' + str(typ))
def _nparray(x):
typ = type(x)
if typ is INDArray:
return ndarray(x).numpy()
elif typ is ndarray:
return x.numpy()
elif 'numpy' in str(typ):
return x
elif typ in (list, tuple):
return np.array(x)
elif typ in (int, float):
return np.array(x)
else:
raise Exception('Data type not understood :' + str(typ))
def broadcast_like(y, x):
xs = x.shape()
ys = y.shape()
if xs == ys:
return y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(ys)
ny = nx
elif ny > nx:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
yt = []
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
yt.append(1)
elif xd == 1:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
elif yd == 1:
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_y:
y = y.repmat(*yt)
return y
def broadcast(x, y):
xs = x.shape()
ys = y.shape()
if xs == ys:
return x, y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(*ys)
ny = nx
elif ny > nx:
diff = ny - nx
xs = ([1] * diff) + xs
x = x.reshape(*xs)
nx = ny
xt = []
yt = []
rep_x = False
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
xt.append(1)
yt.append(1)
elif xd == 1:
xt.append(yd)
yt.append(1)
rep_x = True
elif yd == 1:
xt.append(1)
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_x:
x = Nd4j.tile(x, *xt)
if rep_y:
try:
y = Nd4j.tile(y, *yt)
except:
y = Nd4j.tile(y, *yt)
return x, y
class ndarray(object):
def __init__(self, data, dtype=None):
# we ignore dtype for now
typ = type(data)
if 'nd4j' in typ.__name__:
# Note that we don't make a copy here
self.array = data
elif typ is ndarray:
self.array = data.array.dup()
else:
if typ is not np.ndarray:
data = np.array(data)
self.array = _from_numpy(data)
def numpy(self):
try:
return self.np_array
except AttributeError:
self.np_array = _to_numpy(self.array)
return self.np_array
@property
def size(self):
return self.array.length()
@property
def shape(self):
return tuple(self.array.shape())
@shape.setter
def shape(self, value):
arr = self.reshape(value)
self.array = arr.array
@property
def ndim(self):
return len(self.array.shape())
def __getitem__(self, key):
return ndarray(self.numpy()[key])
if type(key) is int:
return ndarray(self.array.get(NDArrayIndex.point(key)))
if type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
shape = self.array.shape()
if shape[0] == 1:
stop = shape[1]
else:
stop = shape[0]
if stop - start <= 0:
return None
if step is None or step == 1:
return ndarray(self.array.get(NDArrayIndex.interval(start, stop)))
else:
return ndarray(self.array.get(NDArrayIndex.interval(start, step, stop)))
if type(key) is list:
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
if type(key) is tuple:
key = list(key)
shape = self.array.shape()
ndim = len(shape)
nk = len(key)
key += [slice(None)] * (ndim - nk)
args = []
for i, dim in enumerate(key):
if type(dim) is int:
args.append(NDArrayIndex.point(dim))
elif type(dim) is slice:
if dim == slice(None):
args.append(NDArrayIndex.all())
else:
start = dim.start
stop = dim.stop
step = dim.step
if start is None:
start = 0
if stop is None:
stop = shape[i]
if stop - start <= 0:
return None
if step is None or step == 1:
args.append(NDArrayIndex.interval(start, stop))
else:
args.append(NDArrayIndex.interval(
start, step, stop))
elif type(dim) in (list, tuple):
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
return ndarray(self.array.get(*args))
def __setitem__(self, key, other):
self.numpy()[key] = _nparray(other)
return
other = _indarray(other)
view = self[key]
if view is None:
return
view = view.array
other = broadcast_like(other, view)
view.assign(other)
def __add__(self, other):
return ndarray(self.numpy() + _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.add(y))
def __sub__(self, other):
return ndarray(self.numpy() - _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.sub(y))
def __mul__(self, other):
return ndarray(self.numpy() * _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.mul(y))
def __div__(self, other):
return ndarray(self.numpy() / _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.div(y))
def __pow__(self, other):
return ndarray(self.numpy() ** _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(Transforms.pow(x, y))
def __iadd__(self, other):
self.numpy().__iadd__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.addi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.add(y)
return self
def __isub__(self, other):
self.numpy().__isub__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.subi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.sub(y)
return self
def __imul__(self, other):
self.numpy().__imul__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.muli(other)
else:
x, y = broadcast(self.array, other)
self.array = x.mul(y)
return self
def __idiv__(self, other):
self.numpy().__idiv__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.div(y)
return self
def __ipow__(self, other):
self.numpy().__ipow__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = Transforms.pow(x, y)
return self
def __getattr__(self, attr):
import ops
f = getattr(ops, attr)
setattr(ndarray, attr, f)
return getattr(self, attr)
def __int__(self):
if self.array.length() == 1:
return self.array.getInt(0)
raise Exception('Applicable only for scalars')
def __float__(self):
if self.array.length() == 1:
return self.array.getDouble(0)
raise Exception('Applicable only for scalars')
@property
def T(self):
return self.transpose()
def array(*args, **kwargs):
return ndarray(*args, **kwargs)
|
29
|
12
|
0.0
|
112
|
0
|
17
|
1.0
|
0
|
79
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 18,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 18,
"endOffset": 27,
"startLine": 18,
"startOffset": 0
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 175,
"message": "Rename this local variable \"Pointer\" to match the regular expression ^[_a-z][a-z0-9_]*$.",
"textRange": {
"endLine": 175,
"endOffset": 11,
"startLine": 175,
"startOffset": 4
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 194,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 194,
"endOffset": 64,
"startLine": 194,
"startOffset": 14
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 210,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 210,
"endOffset": 64,
"startLine": 210,
"startOffset": 14
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 226,
"message": "Remove this assignment to local variable 'ny'; the value is never used.",
"textRange": {
"endLine": 226,
"endOffset": 15,
"startLine": 226,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 228,
"message": "Define a constant instead of duplicating this literal 'Unable to broadcast shapes ' 4 times.",
"textRange": {
"endLine": 228,
"endOffset": 53,
"startLine": 228,
"startOffset": 24
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 228,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 229,
"endOffset": 43,
"startLine": 228,
"startOffset": 14
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 228,
"message": "Define a constant instead of duplicating this literal '' 4 times.",
"textRange": {
"endLine": 229,
"endOffset": 31,
"startLine": 228,
"startOffset": 67
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 236,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 237,
"endOffset": 47,
"startLine": 236,
"startOffset": 18
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 242,
"message": "Either merge this branch with the identical one on line \"236\" or change one of the implementations.",
"textRange": {
"endLine": 243,
"endOffset": 47,
"startLine": 242,
"startOffset": 12
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 242,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 243,
"endOffset": 47,
"startLine": 242,
"startOffset": 18
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 285,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 286,
"endOffset": 47,
"startLine": 285,
"startOffset": 18
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 292,
"message": "Specify an exception class to catch or reraise the exception",
"textRange": {
"endLine": 292,
"endOffset": 14,
"startLine": 292,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 336,
"message": "Refactor this function to reduce its Cognitive Complexity from 52 to the 15 allowed.",
"textRange": {
"endLine": 336,
"endOffset": 19,
"startLine": 336,
"startOffset": 8
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 338,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 338,
"endOffset": 27,
"startLine": 338,
"startOffset": 11
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 397,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 399,
"endOffset": 23,
"startLine": 397,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 407,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 409,
"endOffset": 32,
"startLine": 407,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 413,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 415,
"endOffset": 32,
"startLine": 413,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 419,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 421,
"endOffset": 32,
"startLine": 419,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 425,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 427,
"endOffset": 32,
"startLine": 425,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 431,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 433,
"endOffset": 44,
"startLine": 431,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 438,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 439,
"endOffset": 46,
"startLine": 438,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 449,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 450,
"endOffset": 46,
"startLine": 449,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 460,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 461,
"endOffset": 46,
"startLine": 460,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 471,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 472,
"endOffset": 46,
"startLine": 471,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "RELIABILITY"
}
],
"line": 482,
"message": "Delete this unreachable code or refactor the code to make it reachable.",
"textRange": {
"endLine": 483,
"endOffset": 46,
"startLine": 482,
"startOffset": 8
},
"type": "BUG"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 499,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 499,
"endOffset": 54,
"startLine": 499,
"startOffset": 14
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "MEDIUM",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 504,
"message": "Replace this generic exception class with a more specific one.",
"textRange": {
"endLine": 504,
"endOffset": 54,
"startLine": 504,
"startOffset": 14
},
"type": "CODE_SMELL"
},
{
"impacts": [
{
"severity": "LOW",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 507,
"message": "Rename method \"T\" to match the regular expression ^[a-z_][a-z0-9_]*$.",
"textRange": {
"endLine": 507,
"endOffset": 9,
"startLine": 507,
"startOffset": 8
},
"type": "CODE_SMELL"
}
] | 56 |
maint/test/cython/cythonapp_test.py
|
DengJackNo1/tornado
| 15,056 |
10101175
|
<filename>maint/test/cython/cythonapp_test.py
from tornado.testing import AsyncTestCase, gen_test
from tornado.util import ArgReplacer
import unittest
import cythonapp
class CythonCoroutineTest(AsyncTestCase):
@gen_test
def test_native_coroutine(self):
x = yield cythonapp.native_coroutine()
self.assertEqual(x, "goodbye")
@gen_test
def test_decorated_coroutine(self):
x = yield cythonapp.decorated_coroutine()
self.assertEqual(x, "goodbye")
class CythonArgReplacerTest(unittest.TestCase):
def test_arg_replacer_function(self):
replacer = ArgReplacer(cythonapp.function_with_args, 'two')
args = (1, 'old', 3)
kwargs = {}
self.assertEqual(replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(replacer.replace('new', args, kwargs),
('old', [1, 'new', 3], {}))
def test_arg_replacer_method(self):
replacer = ArgReplacer(cythonapp.AClass().method_with_args, 'two')
args = (1, 'old', 3)
kwargs = {}
self.assertEqual(replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(replacer.replace('new', args, kwargs),
('old', [1, 'new', 3], {}))
| null | null | null | null | null | null | null | null | null |
[] | 11,617 |
tests/integration/optimizers/test_optimizer_pod_choice.py
|
Rohitpandit021/jina
| 15,179 |
114236
|
import os
import pytest
from jina import Document
from jina.optimizers import FlowOptimizer, EvaluationCallback
from jina.optimizers.flow_runner import SingleFlowRunner
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def config(tmpdir):
os.environ['JINA_OPTIMIZER_WORKSPACE_DIR'] = str(tmpdir)
os.environ['JINA_OPTIMIZER_PARAMETER_FILE'] = os.path.join(cur_dir, 'parameter.yml')
os.environ['JINA_OPTIMIZER_DATA_FILE'] = os.path.join(cur_dir, 'data.jsonlines')
yield
del os.environ['JINA_OPTIMIZER_WORKSPACE_DIR']
del os.environ['JINA_OPTIMIZER_PARAMETER_FILE']
del os.environ['JINA_OPTIMIZER_DATA_FILE']
def document_generator_option1(num_doc):
for _ in range(num_doc):
doc = Document(content='DummyCrafterOption1')
groundtruth_doc = Document(content='hello')
yield doc, groundtruth_doc
def document_generator_option2(num_doc):
for _ in range(num_doc):
doc = Document(content='DummyCrafterOption2')
groundtruth_doc = Document(content='hello')
yield doc, groundtruth_doc
def test_optimizer_single_flow_option1(tmpdir, config):
eval_flow_runner = SingleFlowRunner(
flow_yaml=os.path.join(cur_dir, 'flow_pod_choice.yml'),
documents=document_generator_option1(10),
request_size=1,
execution_endpoint='search',
)
opt = FlowOptimizer(
flow_runner=eval_flow_runner,
parameter_yaml=os.path.join(cur_dir, 'parameter_pod_choice.yml'),
evaluation_callback=EvaluationCallback(),
workspace_base_dir=str(tmpdir),
n_trials=10,
)
result = opt.optimize_flow()
assert (
result.best_parameters['JINA_DUMMYCRAFTER_CHOICE'] == 'pods/craft_option1.yml'
)
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM1'] == 0
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM2'] == 1
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM3'] == 1
def test_optimizer_single_flow_option2(tmpdir, config):
eval_flow_runner = SingleFlowRunner(
flow_yaml=os.path.join(cur_dir, 'flow_pod_choice.yml'),
documents=document_generator_option2(10),
request_size=1,
execution_endpoint='search',
)
opt = FlowOptimizer(
flow_runner=eval_flow_runner,
parameter_yaml=os.path.join(cur_dir, 'parameter_pod_choice.yml'),
evaluation_callback=EvaluationCallback(),
workspace_base_dir=str(tmpdir),
n_trials=20,
)
result = opt.optimize_flow()
assert (
result.best_parameters['JINA_DUMMYCRAFTER_CHOICE'] == 'pods/craft_option2.yml'
)
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM4'] == 0
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM5'] == 1
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM6'] == 1
|
0
|
0
|
0.0
|
2
|
0
|
0
|
1.0
|
0
|
7
|
[] | 699 |
tests/components/speedtestdotnet/test_config_flow.py
|
MrDelik/core
| 30,023 |
10198206
|
<filename>tests/components/speedtestdotnet/test_config_flow.py
"""Tests for SpeedTest config flow."""
from datetime import timedelta
from unittest.mock import MagicMock
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import speedtestdotnet
from homeassistant.components.speedtestdotnet.const import (
CONF_MANUAL,
CONF_SERVER_ID,
CONF_SERVER_NAME,
DOMAIN,
)
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
async def test_flow_works(hass: HomeAssistant) -> None:
"""Test user config."""
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_options(hass: HomeAssistant, mock_api: MagicMock) -> None:
"""Test updating options."""
entry = MockConfigEntry(
domain=DOMAIN,
title="SpeedTest",
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_SERVER_NAME: "Country1 - Sponsor1 - Server1",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SERVER_NAME: "Country1 - Sponsor1 - Server1",
CONF_SERVER_ID: "1",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: True,
}
await hass.async_block_till_done()
assert hass.data[DOMAIN].update_interval is None
# test setting server name to "*Auto Detect"
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_SERVER_NAME: "*Auto Detect",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SERVER_NAME: "*Auto Detect",
CONF_SERVER_ID: None,
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: True,
}
# test setting the option to update periodically
result2 = await hass.config_entries.options.async_init(entry.entry_id)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result2["flow_id"],
user_input={
CONF_SERVER_NAME: "Country1 - Sponsor1 - Server1",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: False,
},
)
await hass.async_block_till_done()
assert hass.data[DOMAIN].update_interval == timedelta(minutes=30)
async def test_integration_already_configured(hass: HomeAssistant) -> None:
"""Test integration is already configured."""
entry = MockConfigEntry(
domain=DOMAIN,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
| null | null | null | null | null | null | null | null | null |
[] | 12,071 |
youtube_dl/extractor/iqiyi.py
|
hackarada/youtube-dl
| 66,635 |
3349350
|
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import itertools
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
clean_html,
decode_packed_codes,
get_element_by_id,
get_element_by_attribute,
ExtractorError,
ohdave_rsa_encrypt,
remove_start,
)
def md5_text(text):
return hashlib.md5(text.encode('utf-8')).hexdigest()
class IqiyiSDK(object):
def __init__(self, target, ip, timestamp):
self.target = target
self.ip = ip
self.timestamp = timestamp
@staticmethod
def split_sum(data):
return compat_str(sum(map(lambda p: int(p, 16), list(data))))
@staticmethod
def digit_sum(num):
if isinstance(num, int):
num = compat_str(num)
return compat_str(sum(map(int, num)))
def even_odd(self):
even = self.digit_sum(compat_str(self.timestamp)[::2])
odd = self.digit_sum(compat_str(self.timestamp)[1::2])
return even, odd
def preprocess(self, chunksize):
self.target = md5_text(self.target)
chunks = []
for i in range(32 // chunksize):
chunks.append(self.target[chunksize * i:chunksize * (i + 1)])
if 32 % chunksize:
chunks.append(self.target[32 - 32 % chunksize:])
return chunks, list(map(int, self.ip.split('.')))
def mod(self, modulus):
chunks, ip = self.preprocess(32)
self.target = chunks[0] + ''.join(map(lambda p: compat_str(p % modulus), ip))
def split(self, chunksize):
modulus_map = {
4: 256,
5: 10,
8: 100,
}
chunks, ip = self.preprocess(chunksize)
ret = ''
for i in range(len(chunks)):
ip_part = compat_str(ip[i] % modulus_map[chunksize]) if i < 4 else ''
if chunksize == 8:
ret += ip_part + chunks[i]
else:
ret += chunks[i] + ip_part
self.target = ret
def handle_input16(self):
self.target = md5_text(self.target)
self.target = self.split_sum(self.target[:16]) + self.target + self.split_sum(self.target[16:])
def handle_input8(self):
self.target = md5_text(self.target)
ret = ''
for i in range(4):
part = self.target[8 * i:8 * (i + 1)]
ret += self.split_sum(part) + part
self.target = ret
def handleSum(self):
self.target = md5_text(self.target)
self.target = self.split_sum(self.target) + self.target
def date(self, scheme):
self.target = md5_text(self.target)
d = time.localtime(self.timestamp)
strings = {
'y': compat_str(d.tm_year),
'm': '%02d' % d.tm_mon,
'd': '%02d' % d.tm_mday,
}
self.target += ''.join(map(lambda c: strings[c], list(scheme)))
def split_time_even_odd(self):
even, odd = self.even_odd()
self.target = odd + md5_text(self.target) + even
def split_time_odd_even(self):
even, odd = self.even_odd()
self.target = even + md5_text(self.target) + odd
def split_ip_time_sum(self):
chunks, ip = self.preprocess(32)
self.target = compat_str(sum(ip)) + chunks[0] + self.digit_sum(self.timestamp)
def split_time_ip_sum(self):
chunks, ip = self.preprocess(32)
self.target = self.digit_sum(self.timestamp) + chunks[0] + compat_str(sum(ip))
class IqiyiSDKInterpreter(object):
def __init__(self, sdk_code):
self.sdk_code = sdk_code
def run(self, target, ip, timestamp):
self.sdk_code = decode_packed_codes(self.sdk_code)
functions = re.findall(r'input=([a-zA-Z0-9]+)\(input', self.sdk_code)
sdk = IqiyiSDK(target, ip, timestamp)
other_functions = {
'handleSum': sdk.handleSum,
'handleInput8': sdk.handle_input8,
'handleInput16': sdk.handle_input16,
'splitTimeEvenOdd': sdk.split_time_even_odd,
'splitTimeOddEven': sdk.split_time_odd_even,
'splitIpTimeSum': sdk.split_ip_time_sum,
'splitTimeIpSum': sdk.split_time_ip_sum,
}
for function in functions:
if re.match(r'mod\d+', function):
sdk.mod(int(function[3:]))
elif re.match(r'date[ymd]{3}', function):
sdk.date(function[4:])
elif re.match(r'split\d+', function):
sdk.split(int(function[5:]))
elif function in other_functions:
other_functions[function]()
else:
raise ExtractorError('Unknown function %s' % function)
return sdk.target
class IqiyiIE(InfoExtractor):
IE_NAME = 'iqiyi'
IE_DESC = '爱奇艺'
_VALID_URL = r'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html'
_NETRC_MACHINE = 'iqiyi'
_TESTS = [{
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
# MD5 checksum differs on my machine and Travis CI
'info_dict': {
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
'ext': 'mp4',
'title': '美国德州空中惊现奇异云团 酷似UFO',
}
}, {
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
'md5': 'b7dc800a4004b1b57749d9abae0472da',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb',
'ext': 'mp4',
# This can be either Simplified Chinese or Traditional Chinese
'title': r're:^(?:名侦探柯南 国语版:第752集 迫近灰原秘密的黑影 下篇|名偵探柯南 國語版:第752集 迫近灰原秘密的黑影 下篇)$',
},
'skip': 'Geo-restricted to China',
}, {
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
'only_matching': True,
}, {
'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
'only_matching': True,
}, {
'url': 'http://yule.iqiyi.com/pcb.html',
'info_dict': {
'id': '4a0af228fddb55ec96398a364248ed7f',
'ext': 'mp4',
'title': '第2017-04-21期 女艺人频遭极端粉丝骚扰',
},
}, {
# VIP-only video. The first 2 parts (6 minutes) are available without login
# MD5 sums omitted as values are different on Travis CI and my machine
'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
'info_dict': {
'id': 'f3cf468b39dddb30d676f89a91200dc1',
'ext': 'mp4',
'title': '泰坦尼克号',
},
'skip': 'Geo-restricted to China',
}, {
'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
'info_dict': {
'id': '202918101',
'title': '灌篮高手 国语版',
},
'playlist_count': 101,
}, {
'url': 'http://www.pps.tv/w_19rrbav0ph.html',
'only_matching': True,
}]
_FORMATS_MAP = {
'96': 1, # 216p, 240p
'1': 2, # 336p, 360p
'2': 3, # 480p, 504p
'21': 4, # 504p
'4': 5, # 720p
'17': 5, # 720p
'5': 6, # 1072p, 1080p
'18': 7, # 1080p
}
def _real_initialize(self):
self._login()
@staticmethod
def _rsa_fun(data):
# public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
return ohdave_rsa_encrypt(data, e, N)
def _login(self):
username, password = self._get_login_info()
# No authentication to be performed
if not username:
return True
data = self._download_json(
'http://kylin.iqiyi.com/get_token', None,
note='Get token for logging', errnote='Unable to get token for logging')
sdk = data['sdk']
timestamp = int(time.time())
target = '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
username, self._rsa_fun(password.encode('utf-8')))
interp = IqiyiSDKInterpreter(sdk)
sign = interp.run(target, data['ip'], timestamp)
validation_params = {
'target': target,
'server': 'BEA3AA1908656AABCCFF76582C4C6660',
'token': data['token'],
'bird_src': 'f8d91d57af224da7893dd397d52d811a',
'sign': sign,
'bird_t': timestamp,
}
validation_result = self._download_json(
'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params), None,
note='Validate credentials', errnote='Unable to validate credentials')
MSG_MAP = {
'P00107': 'please login via the web interface and enter the CAPTCHA code',
'P00117': 'bad username or password',
}
code = validation_result['code']
if code != 'A00000':
msg = MSG_MAP.get(code)
if not msg:
msg = 'error %s' % code
if validation_result.get('msg'):
msg += ': ' + validation_result['msg']
self._downloader.report_warning('unable to log in: ' + msg)
return False
return True
def get_raw_data(self, tvid, video_id):
tm = int(time.time() * 1000)
key = 'd5fb4bd9d50c4be6948c97edd7254b0e'
sc = md5_text(compat_str(tm) + key + tvid)
params = {
'tvid': tvid,
'vid': video_id,
'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
'sc': sc,
't': tm,
}
return self._download_json(
'http://cache.m.iqiyi.com/jp/tmts/%s/%s/' % (tvid, video_id),
video_id, transform_source=lambda s: remove_start(s, 'var tvInfoJs='),
query=params, headers=self.geo_verification_headers())
def _extract_playlist(self, webpage):
PAGE_SIZE = 50
links = re.findall(
r'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
webpage)
if not links:
return
album_id = self._search_regex(
r'albumId\s*:\s*(\d+),', webpage, 'album ID')
album_title = self._search_regex(
r'data-share-title="([^"]+)"', webpage, 'album title', fatal=False)
entries = list(map(self.url_result, links))
# Start from 2 because links in the first page are already on webpage
for page_num in itertools.count(2):
pagelist_page = self._download_webpage(
'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id, page_num, PAGE_SIZE),
album_id,
note='Download playlist page %d' % page_num,
errnote='Failed to download playlist page %d' % page_num)
pagelist = self._parse_json(
remove_start(pagelist_page, 'var tvInfoJs='), album_id)
vlist = pagelist['data']['vlist']
for item in vlist:
entries.append(self.url_result(item['vurl']))
if len(vlist) < PAGE_SIZE:
break
return self.playlist_result(entries, album_id, album_title)
def _real_extract(self, url):
webpage = self._download_webpage(
url, 'temp_id', note='download video page')
# There's no simple way to determine whether an URL is a playlist or not
# Sometimes there are playlist links in individual videos, so treat it
# as a single video first
tvid = self._search_regex(
r'data-(?:player|shareplattrigger)-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid', default=None)
if tvid is None:
playlist_result = self._extract_playlist(webpage)
if playlist_result:
return playlist_result
raise ExtractorError('Can\'t find any video')
video_id = self._search_regex(
r'data-(?:player|shareplattrigger)-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
formats = []
for _ in range(5):
raw_data = self.get_raw_data(tvid, video_id)
if raw_data['code'] != 'A00000':
if raw_data['code'] == 'A00111':
self.raise_geo_restricted()
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
data = raw_data['data']
for stream in data['vidl']:
if 'm3utx' not in stream:
continue
vd = compat_str(stream['vd'])
formats.append({
'url': stream['m3utx'],
'format_id': vd,
'ext': 'mp4',
'preference': self._FORMATS_MAP.get(vd, -1),
'protocol': 'm3u8_native',
})
if formats:
break
self._sleep(5, video_id)
self._sort_formats(formats)
title = (get_element_by_id('widget-videotitle', webpage)
or clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage))
or self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
return {
'id': video_id,
'title': title,
'formats': formats,
}
| null | null | null | null | null | null | null | null | null |
[] | 2,729 |
homeassistant/components/flux_led/switch.py
|
learn-home-automation/core
| 22,481 |
4824441
|
"""Support for Magic Home switches."""
from __future__ import annotations
from typing import Any
from flux_led import DeviceType
from flux_led.aio import AIOWifiLedBulb
from flux_led.const import MODE_MUSIC
from homeassistant import config_entries
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
CONF_REMOTE_ACCESS_ENABLED,
CONF_REMOTE_ACCESS_HOST,
CONF_REMOTE_ACCESS_PORT,
DOMAIN,
)
from .coordinator import FluxLedUpdateCoordinator
from .discovery import async_clear_discovery_cache
from .entity import FluxBaseEntity, FluxEntity, FluxOnOffEntity
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Flux lights."""
coordinator: FluxLedUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities: list[FluxSwitch | FluxRemoteAccessSwitch | FluxMusicSwitch] = []
unique_id = entry.unique_id
name = entry.data[CONF_NAME]
if coordinator.device.device_type == DeviceType.Switch:
entities.append(FluxSwitch(coordinator, unique_id, name))
if entry.data.get(CONF_REMOTE_ACCESS_HOST):
entities.append(FluxRemoteAccessSwitch(coordinator.device, entry))
if coordinator.device.microphone:
entities.append(FluxMusicSwitch(coordinator, unique_id, name))
if entities:
async_add_entities(entities)
class FluxSwitch(FluxOnOffEntity, CoordinatorEntity, SwitchEntity):
"""Representation of a Flux switch."""
async def _async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
if not self.is_on:
await self._device.async_turn_on()
class FluxRemoteAccessSwitch(FluxBaseEntity, SwitchEntity):
"""Representation of a Flux remote access switch."""
_attr_should_poll = False
_attr_entity_category = EntityCategory.CONFIG
def __init__(
self,
device: AIOWifiLedBulb,
entry: config_entries.ConfigEntry,
) -> None:
"""Initialize the light."""
super().__init__(device, entry)
self._attr_name = f"{entry.data[CONF_NAME]} Remote Access"
if entry.unique_id:
self._attr_unique_id = f"{entry.unique_id}_remote_access"
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the remote access on."""
await self._device.async_enable_remote_access(
self.entry.data[CONF_REMOTE_ACCESS_HOST],
self.entry.data[CONF_REMOTE_ACCESS_PORT],
)
await self._async_update_entry(True)
async def _async_update_entry(self, new_state: bool) -> None:
"""Update the entry with the new state on success."""
async_clear_discovery_cache(self.hass, self._device.ipaddr)
self.hass.config_entries.async_update_entry(
self.entry,
data={**self.entry.data, CONF_REMOTE_ACCESS_ENABLED: new_state},
)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the remote access off."""
await self._device.async_disable_remote_access()
await self._async_update_entry(False)
@property
def is_on(self) -> bool:
"""Return true if remote access is enabled."""
return bool(self.entry.data[CONF_REMOTE_ACCESS_ENABLED])
@property
def icon(self) -> str:
"""Return icon based on state."""
return "mdi:cloud-outline" if self.is_on else "mdi:cloud-off-outline"
class FluxMusicSwitch(FluxEntity, SwitchEntity):
"""Representation of a Flux music switch."""
def __init__(
self,
coordinator: FluxLedUpdateCoordinator,
unique_id: str | None,
name: str,
) -> None:
"""Initialize the flux music switch."""
super().__init__(coordinator, unique_id, name)
self._attr_name = f"{name} Music"
if unique_id:
self._attr_unique_id = f"{unique_id}_music"
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the microphone on."""
await self._async_ensure_device_on()
await self._device.async_set_music_mode()
self.async_write_ha_state()
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the microphone off."""
await self._device.async_set_levels(*self._device.rgb, brightness=255)
self.async_write_ha_state()
await self.coordinator.async_request_refresh()
@property
def is_on(self) -> bool:
"""Return true if microphone is is on."""
return self._device.is_on and self._device.effect == MODE_MUSIC
@property
def icon(self) -> str:
"""Return icon based on state."""
return "mdi:microphone" if self.is_on else "mdi:microphone-off"
|
0
|
0
|
0.0
|
10
|
0
|
0
|
1.0
|
0
|
23
|
[] | 3,113 |
src/tools/vttests/template.py
|
Ghosty141/Terminal
| 34,359 |
188124
|
################################################################################
# #
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# #
################################################################################
import sys
import time # time.sleep is in seconds
from common import *
# Run this file with:
# python name-of-file.py
if __name__ == '__main__':
clear_all()
print('This is the VT Test template.')
|
1
|
0
|
0.0
|
1
|
0
|
1
|
1.0
|
0
|
1
|
[
{
"impacts": [
{
"severity": "HIGH",
"softwareQuality": "MAINTAINABILITY"
}
],
"line": 10,
"message": "Import only needed names or import the module and then use its members.",
"textRange": {
"endLine": 10,
"endOffset": 20,
"startLine": 10,
"startOffset": 0
},
"type": "CODE_SMELL"
}
] | 1,145 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.